code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'xlm-roberta'
def __init__(self , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class __lowerCamelCase ( __lowercase ):
@property
def A__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 317
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 1
|
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Tuple = getLogger(__name__)
def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : str , snake_case_ : str , snake_case_ : int = 8 , snake_case_ : int = 1024 , snake_case_ : Any="val" , snake_case_ : Dict=None , snake_case_ : Tuple=False , snake_case_ : Optional[Any]="summarization" , snake_case_ : str=None , snake_case_ : Optional[int]=1 , snake_case_ : Dict = None , snake_case_ : int="" , **snake_case_ : Dict , ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=snake_case_ )
_lowerCAmelCase = Path(snake_case_ )
_lowerCAmelCase = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(snake_case_ )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
_lowerCAmelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_ , snake_case_ ) # update config with task specific params
_lowerCAmelCase = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_lowerCAmelCase = num_return_sequences
_lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_lowerCAmelCase = tokenizer.model_max_length
if prefix is None:
_lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
_lowerCAmelCase = SeqaSeqDataset(
snake_case_ , snake_case_ , snake_case_ , max_target_length=1024 , type_path=snake_case_ , n_obs=snake_case_ , prefix=snake_case_ , **snake_case_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_lowerCAmelCase = ds.make_sortish_sampler(snake_case_ , distributed=snake_case_ , add_extra_examples=snake_case_ , shuffle=snake_case_ )
_lowerCAmelCase = DataLoader(snake_case_ , sampler=snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn )
_lowerCAmelCase = []
for batch in tqdm(snake_case_ ):
_lowerCAmelCase = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=snake_case_ , num_beams=snake_case_ , **snake_case_ , )
_lowerCAmelCase = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = batch["""ids"""]
if num_return_sequences > 1:
_lowerCAmelCase = chunks(snake_case_ , snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case_ , snake_case_ )
return results, sampler.num_replicas
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=snake_case_ , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=snake_case_ , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=snake_case_ , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=snake_case_ , default=snake_case_ )
parser.add_argument(
"""--type_path""" , type=snake_case_ , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=snake_case_ , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=snake_case_ , default=8 , required=snake_case_ , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=snake_case_ , default=-1 , required=snake_case_ , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=snake_case_ , default=snake_case_ , required=snake_case_ , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=snake_case_ , default=1 , required=snake_case_ , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=snake_case_ , default=600 , required=snake_case_ , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument("""--tgt_lang""" , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument(
"""--prefix""" , type=snake_case_ , required=snake_case_ , default=snake_case_ , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
_lowerCAmelCase = time.time()
_lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args()
_lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
_lowerCAmelCase = Path(args.save_dir + """_tmp""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
_lowerCAmelCase = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_lowerCAmelCase = {}
if args.src_lang is not None:
_lowerCAmelCase = args.src_lang
if args.tgt_lang is not None:
_lowerCAmelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = eval_data_dir(
args.data_dir , snake_case_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case_ , **snake_case_ , )
if args.local_rank <= 0:
_lowerCAmelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
_lowerCAmelCase = gather_results_from_each_node(snake_case_ , snake_case_ , args.sync_timeout )
_lowerCAmelCase = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
_lowerCAmelCase = save_dir.joinpath("""pseudolabel_results.json""" )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(snake_case_ , snake_case_ )
return
_lowerCAmelCase = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case_ ) as f:
_lowerCAmelCase = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
_lowerCAmelCase = """translation""" in args.task
_lowerCAmelCase = calculate_bleu if calc_bleu else calculate_rouge
_lowerCAmelCase = """bleu""" if calc_bleu else """rouge"""
_lowerCAmelCase = score_fn(snake_case_ , snake_case_ )
_lowerCAmelCase = len(snake_case_ )
_lowerCAmelCase = time.time() - start_time
_lowerCAmelCase = round(runtime / metrics["""n_obs"""] , 4 )
_lowerCAmelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_lowerCAmelCase = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(snake_case_ , snake_case_ , indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_ , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(snake_case_ , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> List:
"""simple docstring"""
_lowerCAmelCase = []
for partial_result in partial_results:
records.extend(snake_case_ )
_lowerCAmelCase = sorted(snake_case_ , key=lambda snake_case_ : x["id"] )
_lowerCAmelCase = [x["""pred"""] for x in records]
return preds
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str] ) -> List[Dict[str, List]]:
"""simple docstring"""
_lowerCAmelCase = time.time()
logger.info("""waiting for all nodes to finish""" )
_lowerCAmelCase = None
while (time.time() - start_wait) < timeout:
_lowerCAmelCase = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_lowerCAmelCase = lmap(snake_case_ , snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 1
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.task_name.lower()
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = Split.train , lowerCamelCase = None , ):
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowerCamelCase , )
_lowerCAmelCase = args
_lowerCAmelCase = glue_processors[args.task_name]()
_lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase , _lowerCAmelCase = label_list[2], label_list[1]
_lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(lowerCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowerCAmelCase = examples[:limit_length]
_lowerCAmelCase = glue_convert_examples_to_features(
lowerCamelCase , lowerCamelCase , max_length=args.max_seq_length , label_list=lowerCamelCase , output_mode=self.output_mode , )
_lowerCAmelCase = time.time()
torch.save(self.features , lowerCamelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , lowerCamelCase ):
'''simple docstring'''
return self.features[i]
def A__ (self ):
'''simple docstring'''
return self.label_list
| 317
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(snake_case_ , snake_case_ ):
return 0
elif n == 2:
return 1
else:
_lowerCAmelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 2
while digits < n:
index += 1
_lowerCAmelCase = len(str(fibonacci(snake_case_ ) ) )
return index
def __UpperCAmelCase ( snake_case_ : int = 1000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(snake_case_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 317
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase , __lowercase ):
__UpperCamelCase = 'maskformer-swin'
__UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowerCamelCase=224 , lowerCamelCase=4 , lowerCamelCase=3 , lowerCamelCase=96 , lowerCamelCase=[2, 2, 6, 2] , lowerCamelCase=[3, 6, 12, 24] , lowerCamelCase=7 , lowerCamelCase=4.0 , lowerCamelCase=True , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=False , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(lowerCamelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
_lowerCAmelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 317
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.3_5_5_8_1_8,
}
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowerCAmelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __UpperCAmelCase ( snake_case_ : Sequence[float] , snake_case_ : int , snake_case_ : int ) -> tuple[int | None, int | None, float]:
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_lowerCAmelCase = (low + high) // 2
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = max_subarray(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = max_subarray(snake_case_ , mid + 1 , snake_case_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = max_cross_sum(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __UpperCAmelCase ( snake_case_ : Sequence[float] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> tuple[int, int, float]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = float("""-inf""" ), -1
_lowerCAmelCase , _lowerCAmelCase = float("""-inf""" ), -1
_lowerCAmelCase = 0
for i in range(snake_case_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_lowerCAmelCase = summ
_lowerCAmelCase = i
_lowerCAmelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_lowerCAmelCase = summ
_lowerCAmelCase = i
return max_left, max_right, (left_sum + right_sum)
def __UpperCAmelCase ( snake_case_ : int ) -> float:
"""simple docstring"""
_lowerCAmelCase = [randint(1 , snake_case_ ) for _ in range(snake_case_ )]
_lowerCAmelCase = time.time()
max_subarray(snake_case_ , 0 , input_size - 1 )
_lowerCAmelCase = time.time()
return end - start
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
_lowerCAmelCase = [time_max_subarray(snake_case_ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(snake_case_ , snake_case_ ):
print(snake_case_ , """\t\t""" , snake_case_ )
plt.plot(snake_case_ , snake_case_ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : Dict , **snake_case_ : int ) -> Dict:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : Union[str, Any] , **snake_case_ : str ) -> List[str]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : List[str] , **snake_case_ : int ) -> List[str]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : List[Any] , **snake_case_ : List[Any] ) -> int:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : Optional[Any] , **snake_case_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 1
|
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Dict ) -> Any:
"""simple docstring"""
_lowerCAmelCase = set()
_lowerCAmelCase = []
def parse_line(snake_case_ : int ):
for line in fp:
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case_ ) > 0:
_lowerCAmelCase = """\n""".join(snake_case_ )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(snake_case_ )
buffer.clear()
continue
else:
_lowerCAmelCase = line.strip()
buffer.append(snake_case_ )
if from_gh:
for filename in os.listdir(snake_case_ ):
_lowerCAmelCase = os.path.join(snake_case_ , snake_case_ )
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case_ ) as fp:
parse_line(snake_case_ )
else:
try:
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case_ ) as fp:
parse_line(snake_case_ )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Optional[Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = set()
_lowerCAmelCase = [os.path.join(snake_case_ , snake_case_ ) for p in os.listdir(snake_case_ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case_ , snake_case_ ) )
return selected_warnings
if __name__ == "__main__":
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return values.split(""",""" )
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
SCREAMING_SNAKE_CASE : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
SCREAMING_SNAKE_CASE : Optional[Any] = extract_warnings(args.output_dir, args.targets)
SCREAMING_SNAKE_CASE : int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 317
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 1
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE : str = '''src/transformers'''
SCREAMING_SNAKE_CASE : List[str] = '''docs/source/en/tasks'''
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Any ) -> Union[str, Any]:
"""simple docstring"""
with open(snake_case_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.readlines()
# Find the start prompt.
_lowerCAmelCase = 0
while not lines[start_index].startswith(snake_case_ ):
start_index += 1
start_index += 1
_lowerCAmelCase = start_index
while not lines[end_index].startswith(snake_case_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE : List[Any] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE : Dict = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __UpperCAmelCase ( snake_case_ : Tuple ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
_lowerCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case_ , set() )
_lowerCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : str=False ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = _find_text_in_file(
filename=os.path.join(snake_case_ , snake_case_ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
_lowerCAmelCase = get_model_list_for_task(snake_case_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case_ , snake_case_ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 317
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 1
|
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy"""
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A__ (self , lowerCamelCase=0 , lowerCamelCase=(4, 4, 64, 64) , lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) , dtype=lowerCamelCase )
return image
def A__ (self , lowerCamelCase=False , lowerCamelCase="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_lowerCAmelCase = """bf16""" if fpaa else None
_lowerCAmelCase , _lowerCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase , subfolder="""unet""" , dtype=lowerCamelCase , revision=lowerCamelCase )
return model, params
def A__ (self , lowerCamelCase=0 , lowerCamelCase=(4, 77, 768) , lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) , dtype=lowerCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=lowerCamelCase )
_lowerCAmelCase = self.get_latents(lowerCamelCase , fpaa=lowerCamelCase )
_lowerCAmelCase = self.get_encoder_hidden_states(lowerCamelCase , fpaa=lowerCamelCase )
_lowerCAmelCase = model.apply(
{"""params""": params} , lowerCamelCase , jnp.array(lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase , ).sample
assert sample.shape == latents.shape
_lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_lowerCAmelCase = jnp.array(lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase , lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=lowerCamelCase )
_lowerCAmelCase = self.get_latents(lowerCamelCase , shape=(4, 4, 96, 96) , fpaa=lowerCamelCase )
_lowerCAmelCase = self.get_encoder_hidden_states(lowerCamelCase , shape=(4, 77, 1_024) , fpaa=lowerCamelCase )
_lowerCAmelCase = model.apply(
{"""params""": params} , lowerCamelCase , jnp.array(lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase , ).sample
assert sample.shape == latents.shape
_lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_lowerCAmelCase = jnp.array(lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase , lowerCamelCase , atol=1e-2 )
| 317
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 1
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'funnel'
__UpperCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__(self , lowerCamelCase=30_522 , lowerCamelCase=[4, 4, 4] , lowerCamelCase=None , lowerCamelCase=2 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=64 , lowerCamelCase=3_072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=None , lowerCamelCase=1e-9 , lowerCamelCase="mean" , lowerCamelCase="relative_shift" , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = block_sizes
_lowerCAmelCase = [1] * len(lowerCamelCase ) if block_repeats is None else block_repeats
assert len(lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_lowerCAmelCase = num_decoder_layers
_lowerCAmelCase = d_model
_lowerCAmelCase = n_head
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = initializer_range
_lowerCAmelCase = initializer_std
_lowerCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_lowerCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_lowerCAmelCase = attention_type
_lowerCAmelCase = separate_cls
_lowerCAmelCase = truncate_seq
_lowerCAmelCase = pool_q_only
super().__init__(**lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def A__ (self ):
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 317
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list ) -> float:
"""simple docstring"""
_validate_point(snake_case_ )
_validate_point(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(snake_case_ , snake_case_ ) ) )
def __UpperCAmelCase ( snake_case_ : list[float] ) -> None:
"""simple docstring"""
if point:
if isinstance(snake_case_ , snake_case_ ):
for item in point:
if not isinstance(snake_case_ , (int, float) ):
_lowerCAmelCase = (
"""Expected a list of numbers as input, found """
F"""{type(snake_case_ ).__name__}"""
)
raise TypeError(snake_case_ )
else:
_lowerCAmelCase = F"""Expected a list of numbers as input, found {type(snake_case_ ).__name__}"""
raise TypeError(snake_case_ )
else:
raise ValueError("""Missing an input""" )
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list ) -> float:
"""simple docstring"""
_validate_point(snake_case_ )
_validate_point(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(snake_case_ , snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE : List[str] = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase = black.format_str(lowerCamelCase , mode=lowerCamelCase )
_lowerCAmelCase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(lowerCamelCase , """w""" , newline="""\n""" ) as f:
f.write(lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase )
with open(lowerCamelCase , """r""" ) as f:
self.assertTrue(f.read() , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowerCamelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("""Bert""" , lowerCamelCase , lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowerCamelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowerCamelCase ) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_lowerCAmelCase , _lowerCAmelCase = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme["""format_model_list"""] )
self.assertFalse(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase )
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_lowerCAmelCase , _lowerCAmelCase = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 317
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import pprint
import requests
SCREAMING_SNAKE_CASE : Optional[Any] = '''https://zenquotes.io/api'''
def __UpperCAmelCase ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __UpperCAmelCase ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = random_quotes()
pprint.pprint(response)
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : Any ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = len(snake_case_ )
_lowerCAmelCase = sum(snake_case_ )
_lowerCAmelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowerCAmelCase = True
for i in range(1 , s + 1 ):
_lowerCAmelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowerCAmelCase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowerCAmelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowerCAmelCase = s - 2 * j
break
return diff
| 317
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple=None , snake_case_ : int=None , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : int=None , snake_case_ : str=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_lowerCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowerCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowerCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0.02 , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = initializer_range
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowerCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowerCAmelCase = shift_tokens_right(lowerCamelCase , 1 , 2 )
_lowerCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase , )
_lowerCAmelCase = prepare_blenderbot_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(lowerCamelCase )
_lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
_lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , )
_lowerCAmelCase = model.decode(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(lowerCamelCase )
_lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
_lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
_lowerCAmelCase = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
__UpperCamelCase = 99
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_lowerCAmelCase = input_ids.shape[0]
_lowerCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self._get_config_and_data()
_lowerCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
_lowerCAmelCase = lm_model(input_ids=lowerCamelCase )
_lowerCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_lowerCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
_lowerCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_lowerCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_lowerCAmelCase = lm_model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
_lowerCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_lowerCAmelCase = shift_tokens_right(lowerCamelCase , 1 , 2 )
_lowerCAmelCase = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum()
_lowerCAmelCase = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowerCamelCase ( __lowercase , unittest.TestCase , __lowercase ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = FlaxBlenderbotSmallModelTester(self )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase = encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase = encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase = model_class(lowerCamelCase )
_lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_lowerCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return model.decode(
decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase = decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase = decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A__ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowerCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
_lowerCAmelCase = model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 1
|
"""simple docstring"""
from typing import Any
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
class __lowerCamelCase :
def __init__(self ):
'''simple docstring'''
_lowerCAmelCase = None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.head
while temp is not None:
print(temp.data , end=""" """ )
_lowerCAmelCase = temp.next
print()
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = Node(lowerCamelCase )
_lowerCAmelCase = self.head
_lowerCAmelCase = new_node
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCAmelCase = node_a.next
_lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCAmelCase , _lowerCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 317
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = torch.nn.Linear(2 , 4 )
_lowerCAmelCase = torch.optim.AdamW(model.parameters() , lr=1.0 )
_lowerCAmelCase = torch.optim.lr_scheduler.OneCycleLR(snake_case_ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
_lowerCAmelCase = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowerCAmelCase = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> Tuple:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(snake_case_ )
class __lowerCamelCase ( __lowercase ):
@require_cuda
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCamelCase ):
_lowerCAmelCase = Accelerator(cpu=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = GradientState()
assert state.num_steps == 1
_lowerCAmelCase = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowerCAmelCase = False
assert state.sync_gradients is False
GradientState._reset_state()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = create_components()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def A__ (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCamelCase , **lowerCamelCase ):
pass
with patch("""torch.cuda.set_device""" , lowerCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
_lowerCAmelCase = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = get_signature(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = get_signature(lowerCamelCase )
# saving hook
def save_config(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(lowerCamelCase , """data.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
# loading hook
def load_config(lowerCamelCase , lowerCamelCase ):
with open(os.path.join(lowerCamelCase , """data.json""" ) , """r""" ) as f:
_lowerCAmelCase = json.load(lowerCamelCase )
_lowerCAmelCase = config["""class_name"""]
_lowerCAmelCase = accelerator.register_save_state_pre_hook(lowerCamelCase )
_lowerCAmelCase = accelerator.register_load_state_pre_hook(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match with hooks
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = create_components()
_lowerCAmelCase = None
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertTrue(dummy_obj is None )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = create_components()
_lowerCAmelCase = [1, 2, 3]
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(
getattr(lowerCamelCase , """_is_accelerate_prepared""" , lowerCamelCase ) , lowerCamelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(lowerCamelCase , """_is_accelerate_prepared""" , lowerCamelCase ) , lowerCamelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(lowerCamelCase , """_is_accelerate_prepared""" , lowerCamelCase ) , lowerCamelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(lowerCamelCase , """_is_accelerate_prepared""" , lowerCamelCase ) , lowerCamelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(lowerCamelCase , """_is_accelerate_prepared""" , lowerCamelCase ) , lowerCamelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(lowerCamelCase , """_is_accelerate_prepared""" , lowerCamelCase ) , lowerCamelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def A__ (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=lowerCamelCase , device_map={"""""": 0} , )
_lowerCAmelCase = Accelerator()
# This should work
_lowerCAmelCase = accelerator.prepare(lowerCamelCase )
@slow
@require_bnb
def A__ (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
_lowerCAmelCase = Accelerator()
with init_empty_weights():
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase = infer_auto_device_map(lowerCamelCase )
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=lowerCamelCase , load_in_abit=lowerCamelCase , llm_inta_enable_fpaa_cpu_offload=lowerCamelCase )
# This should not work and get value error
with self.assertRaises(lowerCamelCase ):
_lowerCAmelCase = accelerator.prepare(lowerCamelCase )
@slow
@require_bnb
@require_multi_gpu
def A__ (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
_lowerCAmelCase = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase = infer_auto_device_map(lowerCamelCase )
_lowerCAmelCase = 1
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=lowerCamelCase , device_map=lowerCamelCase , )
_lowerCAmelCase = Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCamelCase ):
_lowerCAmelCase = accelerator.prepare(lowerCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def A__ (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
_lowerCAmelCase = infer_auto_device_map(lowerCamelCase )
_lowerCAmelCase = 1
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=lowerCamelCase , device_map=lowerCamelCase , )
_lowerCAmelCase = Accelerator()
# This should work
_lowerCAmelCase = accelerator.prepare(lowerCamelCase )
@require_cuda
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.nn.Linear(10 , 10 )
_lowerCAmelCase = torch.optim.SGD(model.parameters() , lr=0.01 )
_lowerCAmelCase = Accelerator(cpu=lowerCamelCase )
_lowerCAmelCase = accelerator.prepare(lowerCamelCase )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
"""simple docstring"""
_lowerCAmelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_lowerCAmelCase = 1 - (matter_density + radiation_density + dark_energy)
_lowerCAmelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_lowerCAmelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE : List[str] = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 317
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 1
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
lowerCamelCase , split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , )
_lowerCAmelCase = field
_lowerCAmelCase = path_or_paths if isinstance(lowerCamelCase , lowerCamelCase ) else {self.split: path_or_paths}
_lowerCAmelCase = Json(
cache_dir=lowerCamelCase , data_files=lowerCamelCase , features=lowerCamelCase , field=lowerCamelCase , **lowerCamelCase , )
def A__ (self ):
'''simple docstring'''
if self.streaming:
_lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , )
_lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
_lowerCAmelCase = dataset
_lowerCAmelCase = path_or_buf
_lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase = num_proc
_lowerCAmelCase = """utf-8"""
_lowerCAmelCase = to_json_kwargs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.to_json_kwargs.pop("""path_or_buf""" , lowerCamelCase )
_lowerCAmelCase = self.to_json_kwargs.pop("""orient""" , """records""" )
_lowerCAmelCase = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
_lowerCAmelCase = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
_lowerCAmelCase = self.to_json_kwargs.pop("""compression""" , lowerCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCamelCase ) as buffer:
_lowerCAmelCase = self._write(file_obj=lowerCamelCase , orient=lowerCamelCase , lines=lowerCamelCase , index=lowerCamelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
""" was passed. Please provide a local path instead.""" )
_lowerCAmelCase = self._write(
file_obj=self.path_or_buf , orient=lowerCamelCase , lines=lowerCamelCase , index=lowerCamelCase , **self.to_json_kwargs )
return written
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = args
_lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(lowerCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCAmelCase = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase , orient=lowerCamelCase , lines=lowerCamelCase , index=lowerCamelCase , **lowerCamelCase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
_lowerCAmelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase )
else:
_lowerCAmelCase , _lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCamelCase , lowerCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowerCamelCase )
return written
| 317
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = ['image_processor', 'tokenizer']
__UpperCamelCase = 'CLIPImageProcessor'
__UpperCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
_lowerCAmelCase = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if images is not None:
_lowerCAmelCase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
_lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 317
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 1
|
"""simple docstring"""
from statistics import mean, stdev
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : int = 3 ) -> list:
"""simple docstring"""
_lowerCAmelCase = min(snake_case_ )
_lowerCAmelCase = max(snake_case_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case_ ) for x in data]
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : int = 3 ) -> list:
"""simple docstring"""
_lowerCAmelCase = mean(snake_case_ )
_lowerCAmelCase = stdev(snake_case_ )
# standardize data
return [round((x - mu) / (sigma) , snake_case_ ) for x in data]
| 317
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'gpt_neo'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self , lowerCamelCase=50_257 , lowerCamelCase=2_048 , lowerCamelCase=2_048 , lowerCamelCase=24 , lowerCamelCase=[[["global", "local"], 12]] , lowerCamelCase=16 , lowerCamelCase=None , lowerCamelCase=256 , lowerCamelCase="gelu_new" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=1e-5 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=50_256 , lowerCamelCase=50_256 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_layers
_lowerCAmelCase = num_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = window_size
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_dropout
_lowerCAmelCase = embed_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = attention_types
_lowerCAmelCase = self.expand_attention_types_params(lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
@staticmethod
def A__ (lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Optional[int] ) -> Tuple:
"""simple docstring"""
import torch
_lowerCAmelCase = input.size()
_lowerCAmelCase = len(snake_case_ )
_lowerCAmelCase = shape[dimension]
_lowerCAmelCase = torch.arange(0 , snake_case_ , snake_case_ )
_lowerCAmelCase = torch.div(sizedim - size , snake_case_ , rounding_mode="""floor""" ) + 1
_lowerCAmelCase = torch.arange(snake_case_ ) + low_indices[:min_length][:, None]
_lowerCAmelCase = [slice(snake_case_ )] * rank
_lowerCAmelCase = indices
_lowerCAmelCase = input[s]
_lowerCAmelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
import torch
_lowerCAmelCase = torch.arange(1 , snake_case_ )
_lowerCAmelCase = torch.remainder(snake_case_ , snake_case_ )
_lowerCAmelCase = remainders == 0
_lowerCAmelCase = candidates[divisor_indices]
_lowerCAmelCase = torch.max(snake_case_ )
return largest_divisor, torch.div(snake_case_ , snake_case_ , rounding_mode="""floor""" )
class __lowerCamelCase ( __lowercase ):
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="""inputs""" )
_lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A__ (self ):
'''simple docstring'''
return self._config.num_heads
def A__ (self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
_lowerCAmelCase = super(lowerCamelCase , self ).generate_dummy_inputs(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
_lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
_lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
_lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def A__ (self ):
'''simple docstring'''
return 13
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = FunnelTokenizer
__UpperCamelCase = FunnelTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def A__ (self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
_lowerCAmelCase = tokenizer("""UNwant\u00E9d,running""" )
_lowerCAmelCase = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
_lowerCAmelCase = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 1
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
SCREAMING_SNAKE_CASE : Tuple = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
SCREAMING_SNAKE_CASE : str = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
'''emoji''': True,
},
}
]
SCREAMING_SNAKE_CASE : Optional[int] = 0
for log in Path().glob('''*.log'''):
SCREAMING_SNAKE_CASE : Tuple = 0
with open(log, '''r''') as f:
for line in f:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(line)
if line.get('''nodeid''', '''''') != "":
SCREAMING_SNAKE_CASE : Optional[int] = line['''nodeid''']
if line.get('''duration''', None) is not None:
SCREAMING_SNAKE_CASE : Any = F'{line["duration"]:.4f}'
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
SCREAMING_SNAKE_CASE : List[Any] = []
log.unlink()
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Optional[int] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for test in failed_tests:
SCREAMING_SNAKE_CASE : List[Any] = test[0].split('''::''')
SCREAMING_SNAKE_CASE : Dict = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
SCREAMING_SNAKE_CASE : Optional[Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
SCREAMING_SNAKE_CASE : List[str] = [test[0] for test in failed_table]
SCREAMING_SNAKE_CASE : int = list(set(files))
# Count number of instances in failed_tests
SCREAMING_SNAKE_CASE : List[str] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
SCREAMING_SNAKE_CASE : Optional[int] = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
SCREAMING_SNAKE_CASE : str = '''Too many failed tests, please see the full report in the Action results.'''
SCREAMING_SNAKE_CASE : List[str] = len(err) + 1_0
SCREAMING_SNAKE_CASE : Optional[Any] = message[: 3_0_0_0 - offset] + F'\n...\n```\n{err}'
print(F'### {message}')
else:
SCREAMING_SNAKE_CASE : Optional[Any] = '''No failed tests! 🤗'''
print(F'## {message}')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE : List[Any] = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
SCREAMING_SNAKE_CASE : Any = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
SCREAMING_SNAKE_CASE : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
SCREAMING_SNAKE_CASE : List[str] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
SCREAMING_SNAKE_CASE : List[str] = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
SCREAMING_SNAKE_CASE : Dict = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
SCREAMING_SNAKE_CASE : List[Any] = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
SCREAMING_SNAKE_CASE : Tuple = row[0]
else:
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 317
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 1
|
"""simple docstring"""
from math import sqrt
def __UpperCAmelCase ( snake_case_ : int ) -> bool:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' must been an int and positive"
_lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
_lowerCAmelCase = False
for divisor in range(2 , int(round(sqrt(snake_case_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_lowerCAmelCase = False
break
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'status' must been from type bool"
return status
def __UpperCAmelCase ( snake_case_ : str ) -> Dict:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_lowerCAmelCase = list(range(2 , n + 1 ) )
_lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(snake_case_ ) ):
for j in range(i + 1 , len(snake_case_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_lowerCAmelCase = 0
# filters actual prime numbers.
_lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type list"
return ans
def __UpperCAmelCase ( snake_case_ : str ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (n > 2), "'N' must been an int and > 2"
_lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(snake_case_ ):
ans.append(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type list"
return ans
def __UpperCAmelCase ( snake_case_ : str ) -> Tuple:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and number >= 0, "'number' must been an int and >= 0"
_lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
_lowerCAmelCase = 2
_lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(snake_case_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(snake_case_ ):
while quotient != 1:
if is_prime(snake_case_ ) and (quotient % factor == 0):
ans.append(snake_case_ )
quotient /= factor
else:
factor += 1
else:
ans.append(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type list"
return ans
def __UpperCAmelCase ( snake_case_ : Tuple ) -> Dict:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCAmelCase = 0
# prime factorization of 'number'
_lowerCAmelCase = prime_factorization(snake_case_ )
_lowerCAmelCase = max(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type int"
return ans
def __UpperCAmelCase ( snake_case_ : Any ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCAmelCase = 0
# prime factorization of 'number'
_lowerCAmelCase = prime_factorization(snake_case_ )
_lowerCAmelCase = min(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type int"
return ans
def __UpperCAmelCase ( snake_case_ : Tuple ) -> Dict:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , snake_case_ ), "compare bust been from type bool"
return number % 2 == 0
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , snake_case_ ), "compare bust been from type bool"
return number % 2 != 0
def __UpperCAmelCase ( snake_case_ : Dict ) -> List[str]:
"""simple docstring"""
assert (
isinstance(snake_case_ , snake_case_ ) and (number > 2) and is_even(snake_case_ )
), "'number' must been an int, even and > 2"
_lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_lowerCAmelCase = get_prime_numbers(snake_case_ )
_lowerCAmelCase = len(snake_case_ )
# run variable for while-loops.
_lowerCAmelCase = 0
_lowerCAmelCase = None
# exit variable. for break up the loops
_lowerCAmelCase = True
while i < len_pn and loop:
_lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_lowerCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and (len(snake_case_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple ) -> Any:
"""simple docstring"""
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_lowerCAmelCase = 0
while numbera != 0:
_lowerCAmelCase = numbera % numbera
_lowerCAmelCase = numbera
_lowerCAmelCase = rest
# precondition
assert isinstance(snake_case_ , snake_case_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Any ) -> int:
"""simple docstring"""
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_lowerCAmelCase = prime_factorization(snake_case_ )
_lowerCAmelCase = prime_factorization(snake_case_ )
elif numbera == 1 or numbera == 1:
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = max(snake_case_ , snake_case_ )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_lowerCAmelCase = prime_fac_a.count(snake_case_ )
_lowerCAmelCase = prime_fac_a.count(snake_case_ )
for _ in range(max(snake_case_ , snake_case_ ) ):
ans *= n
else:
_lowerCAmelCase = prime_fac_a.count(snake_case_ )
for _ in range(snake_case_ ):
ans *= n
done.append(snake_case_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_lowerCAmelCase = prime_fac_a.count(snake_case_ )
for _ in range(snake_case_ ):
ans *= n
done.append(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (n >= 0), "'number' must been a positive int"
_lowerCAmelCase = 0
_lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(snake_case_ ):
ans += 1
# precondition
assert isinstance(snake_case_ , snake_case_ ) and is_prime(
snake_case_ ), "'ans' must been a prime number and from type int"
return ans
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : int ) -> Any:
"""simple docstring"""
assert (
is_prime(snake_case_ ) and is_prime(snake_case_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_lowerCAmelCase = p_number_a + 1 # jump to the next number
_lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(snake_case_ ):
number += 1
while number < p_number_a:
ans.append(snake_case_ )
number += 1
# fetch the next prime number.
while not is_prime(snake_case_ ):
number += 1
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and ans[0] != p_number_a
and ans[len(snake_case_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __UpperCAmelCase ( snake_case_ : int ) -> Dict:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (n >= 1), "'n' must been int and >= 1"
_lowerCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(snake_case_ )
# precondition
assert ans[0] == 1 and ans[len(snake_case_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (
number > 1
), "'number' must been an int and >= 1"
_lowerCAmelCase = get_divisors(snake_case_ )
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and (divisors[0] == 1)
and (divisors[len(snake_case_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Dict ) -> List[Any]:
"""simple docstring"""
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_lowerCAmelCase = gcd(abs(snake_case_ ) , abs(snake_case_ ) )
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (n >= 0), "'n' must been a int and >= 0"
_lowerCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ) and (n >= 0), "'n' must been an int and >= 0"
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
_lowerCAmelCase = ans
ans += fiba
_lowerCAmelCase = tmp
return ans
| 317
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 1
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def __UpperCAmelCase ( snake_case_ : int ) -> datetime:
"""simple docstring"""
_lowerCAmelCase = year % 19
_lowerCAmelCase = year % 4
_lowerCAmelCase = year % 7
_lowerCAmelCase = math.floor(year / 100 )
_lowerCAmelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_lowerCAmelCase = leap_day_inhibits / 4
_lowerCAmelCase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_lowerCAmelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_lowerCAmelCase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(snake_case_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(snake_case_ , 4 , 18 )
else:
return datetime(snake_case_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
SCREAMING_SNAKE_CASE : Dict = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 317
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( snake_case_ : list[float] ) -> float:
"""simple docstring"""
_lowerCAmelCase = 0.0_0
_lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
_lowerCAmelCase = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(snake_case_ )
first_sum += 1 / float(snake_case_ )
index += 1
return 1 / first_sum
def __UpperCAmelCase ( snake_case_ : list[float] ) -> float:
"""simple docstring"""
_lowerCAmelCase = 0.0_0
_lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowerCAmelCase = F"""Resistor at index {index} has a negative value!"""
raise ValueError(snake_case_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
__UpperCamelCase = StableDiffusionLDMaDPipeline
__UpperCamelCase = TEXT_TO_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_lowerCAmelCase = CLIPTextModel(lowerCamelCase )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionLDMaDPipeline(**lowerCamelCase )
_lowerCAmelCase = ldmad_pipe.to(lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = ldmad_pipe(**lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = output.rgb, output.depth
_lowerCAmelCase = rgb[0, -3:, -3:, -1]
_lowerCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_lowerCAmelCase = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
_lowerCAmelCase = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionLDMaDPipeline(**lowerCamelCase )
_lowerCAmelCase = ldmad_pipe.to(lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = ldmad_pipe(**lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = output.rgb, output.depth
_lowerCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_lowerCAmelCase = depth_slice_a[0, -3:, -1]
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = ldmad_pipe.tokenizer(
lowerCamelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(lowerCamelCase )
_lowerCAmelCase = ldmad_pipe.text_encoder(lowerCamelCase )[0]
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = ldmad_pipe(**lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = output.rgb, output.depth
_lowerCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_lowerCAmelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase )
_lowerCAmelCase = StableDiffusionLDMaDPipeline(**lowerCamelCase )
_lowerCAmelCase = ldmad_pipe.to(lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = """french fries"""
_lowerCAmelCase = ldmad_pipe(**lowerCamelCase , negative_prompt=lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = output.rgb, output.depth
_lowerCAmelCase = rgb[0, -3:, -3:, -1]
_lowerCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_lowerCAmelCase = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
_lowerCAmelCase = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self , lowerCamelCase , lowerCamelCase="cpu" , lowerCamelCase=torch.floataa , lowerCamelCase=0 ):
'''simple docstring'''
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
_lowerCAmelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
_lowerCAmelCase = ldmad_pipe.to(lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_inputs(lowerCamelCase )
_lowerCAmelCase = ldmad_pipe(**lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = output.rgb, output.depth
_lowerCAmelCase = rgb[0, -3:, -3:, -1].flatten()
_lowerCAmelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_lowerCAmelCase = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
_lowerCAmelCase = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self , lowerCamelCase , lowerCamelCase="cpu" , lowerCamelCase=torch.floataa , lowerCamelCase=0 ):
'''simple docstring'''
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
_lowerCAmelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_inputs(lowerCamelCase )
_lowerCAmelCase = ldmad_pipe(**lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = output.rgb, output.depth
_lowerCAmelCase = 0.49_5586
_lowerCAmelCase = 0.3379_5515
_lowerCAmelCase = 112.4_8518
_lowerCAmelCase = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_inputs(lowerCamelCase )
_lowerCAmelCase = ldmad_pipe(**lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = output.rgb, output.depth
_lowerCAmelCase = 0.419_4127
_lowerCAmelCase = 0.3537_5586
_lowerCAmelCase = 0.563_8502
_lowerCAmelCase = 0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __lowerCamelCase :
def __init__(self , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
_lowerCAmelCase = model
_lowerCAmelCase = kwargs.get("""model_save_dir""" , lowerCamelCase )
_lowerCAmelCase = kwargs.get("""latest_model_name""" , lowerCamelCase )
def __call__(self , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = {k: np.array(lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(lowerCamelCase , lowerCamelCase )
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
_lowerCAmelCase = """CPUExecutionProvider"""
return ort.InferenceSession(lowerCamelCase , providers=[provider] , sess_options=lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
_lowerCAmelCase = Path(lowerCamelCase ).joinpath(lowerCamelCase )
try:
shutil.copyfile(lowerCamelCase , lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_lowerCAmelCase = self.model_save_dir.joinpath(lowerCamelCase )
if src_path.exists():
_lowerCAmelCase = Path(lowerCamelCase ).joinpath(lowerCamelCase )
try:
shutil.copyfile(lowerCamelCase , lowerCamelCase )
except shutil.SameFileError:
pass
def A__ (self , lowerCamelCase , **lowerCamelCase , ):
'''simple docstring'''
if os.path.isfile(lowerCamelCase ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
# saving model weights/files
self._save_pretrained(lowerCamelCase , **lowerCamelCase )
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCamelCase ):
_lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(lowerCamelCase , lowerCamelCase ) , provider=lowerCamelCase , sess_options=lowerCamelCase )
_lowerCAmelCase = Path(lowerCamelCase )
# load model from hub
else:
# download model
_lowerCAmelCase = hf_hub_download(
repo_id=lowerCamelCase , filename=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , )
_lowerCAmelCase = Path(lowerCamelCase ).parent
_lowerCAmelCase = Path(lowerCamelCase ).name
_lowerCAmelCase = OnnxRuntimeModel.load_model(lowerCamelCase , provider=lowerCamelCase , sess_options=lowerCamelCase )
return cls(model=lowerCamelCase , **lowerCamelCase )
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = None
if len(str(lowerCamelCase ).split("""@""" ) ) == 2:
_lowerCAmelCase , _lowerCAmelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=lowerCamelCase , revision=lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , use_auth_token=lowerCamelCase , **lowerCamelCase , )
| 317
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
SCREAMING_SNAKE_CASE : Tuple = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : str = 0XE000
SCREAMING_SNAKE_CASE : Any = 0XE001
SCREAMING_SNAKE_CASE : Optional[int] = 0XE002
SCREAMING_SNAKE_CASE : List[Any] = 0XE003
SCREAMING_SNAKE_CASE : Union[str, Any] = 0XE004
# Maps special codepoints to human-readable names.
SCREAMING_SNAKE_CASE : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
SCREAMING_SNAKE_CASE : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , lowerCamelCase=chr(lowerCamelCase ) , lowerCamelCase=chr(lowerCamelCase ) , lowerCamelCase=chr(lowerCamelCase ) , lowerCamelCase=chr(lowerCamelCase ) , lowerCamelCase=chr(lowerCamelCase ) , lowerCamelCase=chr(lowerCamelCase ) , lowerCamelCase=False , lowerCamelCase=2_048 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , model_max_length=lowerCamelCase , **lowerCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
_lowerCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_lowerCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_lowerCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_lowerCAmelCase = UNICODE_VOCAB_SIZE
_lowerCAmelCase = len(self._special_codepoints )
@property
def A__ (self ):
'''simple docstring'''
return self._unicode_vocab_size
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return list(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
try:
return ord(lowerCamelCase )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCamelCase )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return "".join(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
_lowerCAmelCase = [1] + ([0] * len(lowerCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCamelCase )) + [1]
return result
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
return ()
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> list[list[int]]:
"""simple docstring"""
_lowerCAmelCase = []
create_all_state(1 , snake_case_ , snake_case_ , [] , snake_case_ )
return result
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(snake_case_ , total_number - level + 2 ):
current_list.append(snake_case_ )
create_all_state(i + 1 , snake_case_ , level - 1 , snake_case_ , snake_case_ )
current_list.pop()
def __UpperCAmelCase ( snake_case_ : list[list[int]] ) -> None:
"""simple docstring"""
for i in total_list:
print(*snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 317
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 1
|
"""simple docstring"""
import operator as op
SCREAMING_SNAKE_CASE : Union[str, Any] = '''scaler.pt'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''pytorch_model'''
SCREAMING_SNAKE_CASE : str = '''random_states'''
SCREAMING_SNAKE_CASE : int = '''optimizer'''
SCREAMING_SNAKE_CASE : Optional[int] = '''scheduler'''
SCREAMING_SNAKE_CASE : int = '''pytorch_model.bin'''
SCREAMING_SNAKE_CASE : List[Any] = '''pytorch_model.bin.index.json'''
SCREAMING_SNAKE_CASE : str = '''model.safetensors'''
SCREAMING_SNAKE_CASE : str = '''model.safetensors.index.json'''
SCREAMING_SNAKE_CASE : Dict = '''1.10.2'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''py38'''
SCREAMING_SNAKE_CASE : List[str] = '''4.17.0'''
SCREAMING_SNAKE_CASE : Dict = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
SCREAMING_SNAKE_CASE : Dict = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
SCREAMING_SNAKE_CASE : List[Any] = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
SCREAMING_SNAKE_CASE : List[Any] = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
SCREAMING_SNAKE_CASE : Any = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
SCREAMING_SNAKE_CASE : Any = '''2.0.1'''
SCREAMING_SNAKE_CASE : Tuple = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
SCREAMING_SNAKE_CASE : Tuple = ['''default''', '''reduce-overhead''', '''max-autotune''']
SCREAMING_SNAKE_CASE : int = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
SCREAMING_SNAKE_CASE : List[str] = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
SCREAMING_SNAKE_CASE : Dict = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 317
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : int = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = ['image_processor', 'tokenizer']
__UpperCamelCase = 'ViltImageProcessor'
__UpperCamelCase = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
_lowerCAmelCase = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = self.image_processor
def __call__(self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
# add pixel_values + pixel_mask
_lowerCAmelCase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase )
encoding.update(lowerCamelCase )
return encoding
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ (self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase , )
return self.image_processor_class
@property
def A__ (self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase , )
return self.image_processor
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 1
|
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = PegasusTokenizer
__UpperCamelCase = PegasusTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def A__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PegasusTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ (self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """</s>"""
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(lowerCamelCase ) , 1_103 )
def A__ (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
_lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowerCAmelCase = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_lowerCAmelCase = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
_lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
_lowerCAmelCase = """To ensure a smooth flow of bank resolutions."""
_lowerCAmelCase = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
_lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ["""This is going to be way too long.""" * 150, """short example"""]
_lowerCAmelCase = ["""not super long but more than 5 tokens""", """tiny"""]
_lowerCAmelCase = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
_lowerCAmelCase = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = {"""input_ids""": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = PegasusTokenizer
__UpperCamelCase = PegasusTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def A__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PegasusTokenizer(lowerCamelCase , offset=0 , mask_token_sent=lowerCamelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ (self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
_lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ["""This is going to be way too long.""" * 1_000, """short example"""]
_lowerCAmelCase = ["""not super long but more than 5 tokens""", """tiny"""]
_lowerCAmelCase = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
_lowerCAmelCase = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_lowerCAmelCase = self._large_tokenizer(lowerCamelCase ).input_ids
self.assertListEqual(
lowerCamelCase , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 317
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
_lowerCAmelCase = len(snake_case_ )
for i in range(1 , snake_case_ ):
_lowerCAmelCase = collection[i]
_lowerCAmelCase = 0
_lowerCAmelCase = i - 1
while low <= high:
_lowerCAmelCase = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase = mid - 1
else:
_lowerCAmelCase = mid + 1
for j in range(snake_case_ , snake_case_ , -1 ):
_lowerCAmelCase = collection[j - 1]
_lowerCAmelCase = val
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : Tuple = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 317
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE : Any = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE : Tuple = {
'''facebook/nllb-large-en-ro''': 1_0_2_4,
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
SCREAMING_SNAKE_CASE : Optional[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = NllbTokenizer
__UpperCamelCase = []
__UpperCamelCase = []
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
_lowerCAmelCase = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase , additional_special_tokens=lowerCamelCase , legacy_behaviour=lowerCamelCase , **lowerCamelCase , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
_lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
_lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A__ (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = self.convert_tokens_to_ids(lowerCamelCase )
_lowerCAmelCase = tgt_lang_id
return inputs
def A__ (self , lowerCamelCase , lowerCamelCase = "eng_Latn" , lowerCamelCase = None , lowerCamelCase = "fra_Latn" , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
def A__ (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A__ (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.convert_tokens_to_ids(lowerCamelCase )
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
_lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.convert_tokens_to_ids(lowerCamelCase )
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
_lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_lowerCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 317
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = KandinskyVaaPipeline
__UpperCamelCase = [
'image_embeds',
'negative_image_embeds',
]
__UpperCamelCase = ['image_embeds', 'negative_image_embeds']
__UpperCamelCase = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__UpperCamelCase = False
@property
def A__ (self ):
'''simple docstring'''
return 32
@property
def A__ (self ):
'''simple docstring'''
return 32
@property
def A__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ (self ):
'''simple docstring'''
return 100
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def A__ (self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCamelCase , )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase )
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
_lowerCAmelCase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowerCamelCase ) , return_dict=lowerCamelCase , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
_lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
_lowerCAmelCase = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = """red cat, 4k photo"""
_lowerCAmelCase = torch.Generator(device="""cuda""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase = torch.Generator(device="""cuda""" ).manual_seed(0 )
_lowerCAmelCase = pipeline(
image_embeds=lowerCamelCase , negative_image_embeds=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=100 , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
| 317
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __UpperCAmelCase ( snake_case_ : str ) -> list[str]:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(snake_case_ ) )]
def __UpperCAmelCase ( snake_case_ : str ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowerCAmelCase = all_rotations(snake_case_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowerCAmelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(snake_case_ ),
}
return response
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : int ) -> str:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowerCAmelCase = int(snake_case_ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(snake_case_ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowerCAmelCase = [""""""] * len(snake_case_ )
for _ in range(len(snake_case_ ) ):
for i in range(len(snake_case_ ) ):
_lowerCAmelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = '''Provide a string that I will generate its BWT transform: '''
SCREAMING_SNAKE_CASE : Optional[int] = input(entry_msg).strip()
SCREAMING_SNAKE_CASE : Tuple = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
SCREAMING_SNAKE_CASE : int = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 317
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : int = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : List[str] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''ViTFeatureExtractor''']
SCREAMING_SNAKE_CASE : Dict = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : int=False ) -> Dict:
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = len(set_a.intersection(snake_case_ ) )
if alternative_union:
_lowerCAmelCase = len(snake_case_ ) + len(snake_case_ )
else:
_lowerCAmelCase = len(set_a.union(snake_case_ ) )
return intersection / union
if isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) ):
_lowerCAmelCase = [element for element in set_a if element in set_b]
if alternative_union:
_lowerCAmelCase = len(snake_case_ ) + len(snake_case_ )
return len(snake_case_ ) / union
else:
_lowerCAmelCase = set_a + [element for element in set_b if element not in set_a]
return len(snake_case_ ) / len(snake_case_ )
return len(snake_case_ ) / len(snake_case_ )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = {'''a''', '''b''', '''c''', '''d''', '''e'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 1
|
"""simple docstring"""
import requests
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> None:
"""simple docstring"""
_lowerCAmelCase = {"""Content-Type""": """application/json"""}
_lowerCAmelCase = requests.post(snake_case_ , json={"""text""": message_body} , headers=snake_case_ )
if response.status_code != 200:
_lowerCAmelCase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 317
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 1
|
"""simple docstring"""
import pytest
SCREAMING_SNAKE_CASE : Optional[Any] = '''__dummy_dataset1__'''
SCREAMING_SNAKE_CASE : List[str] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = dataset_loading_script_name
_lowerCAmelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=snake_case_ )
_lowerCAmelCase = script_dir / F"""{script_name}.py"""
with open(snake_case_ , """w""" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
| 317
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 1
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
SCREAMING_SNAKE_CASE : int = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 2048-bit
1_4: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 3072-bit
1_5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 4096-bit
1_6: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 6144-bit
1_7: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 8192-bit
1_8: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
}
class __lowerCamelCase :
def __init__(self , lowerCamelCase = 14 ):
'''simple docstring'''
if group not in primes:
raise ValueError("""Unsupported Group""" )
_lowerCAmelCase = primes[group]["""prime"""]
_lowerCAmelCase = primes[group]["""generator"""]
_lowerCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def A__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase )[2:]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = int(lowerCamelCase , base=16 )
if not self.is_valid_public_key(lowerCamelCase ):
raise ValueError("""Invalid public key""" )
_lowerCAmelCase = pow(lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase , (prime - 1) // 2 , lowerCamelCase ) == 1
)
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase , lowerCamelCase = 14 ):
'''simple docstring'''
_lowerCAmelCase = int(lowerCamelCase , base=16 )
_lowerCAmelCase = int(lowerCamelCase , base=16 )
_lowerCAmelCase = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase , lowerCamelCase ):
raise ValueError("""Invalid public key""" )
_lowerCAmelCase = pow(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 1
|
"""simple docstring"""
import math
import qiskit
def __UpperCAmelCase ( snake_case_ : int = 1 , snake_case_ : int = 1 , snake_case_ : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(snake_case_ , snake_case_ )
or isinstance(snake_case_ , snake_case_ )
or isinstance(snake_case_ , snake_case_ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(snake_case_ ) != input_a)
or (math.floor(snake_case_ ) != input_a)
or (math.floor(snake_case_ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
_lowerCAmelCase = qiskit.QuantumRegister(4 , """qr""" )
_lowerCAmelCase = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
_lowerCAmelCase = [input_a, input_a, carry_in]
_lowerCAmelCase = qiskit.QuantumCircuit(snake_case_ , snake_case_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(snake_case_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(snake_case_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(snake_case_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , snake_case_ ) # measure the last two qbits
_lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
_lowerCAmelCase = qiskit.execute(snake_case_ , snake_case_ , shots=1000 )
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 317
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def A__ (*lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
__UpperCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
_lowerCAmelCase = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = object_detector(examples[0] , threshold=0.0 )
_lowerCAmelCase = len(lowerCamelCase )
self.assertGreater(lowerCamelCase , 0 )
self.assertEqual(
lowerCamelCase , [
{
"""score""": ANY(lowerCamelCase ),
"""label""": ANY(lowerCamelCase ),
"""box""": {"""xmin""": ANY(lowerCamelCase ), """ymin""": ANY(lowerCamelCase ), """xmax""": ANY(lowerCamelCase ), """ymax""": ANY(lowerCamelCase )},
}
for i in range(lowerCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def A__ (self ):
'''simple docstring'''
pass
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
_lowerCAmelCase = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
_lowerCAmelCase = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = pipeline("""zero-shot-object-detection""" )
_lowerCAmelCase = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
_lowerCAmelCase = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def A__ (self ):
'''simple docstring'''
pass
@require_torch
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = 0.2
_lowerCAmelCase = pipeline("""zero-shot-object-detection""" )
_lowerCAmelCase = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCamelCase , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = 2
_lowerCAmelCase = pipeline("""zero-shot-object-detection""" )
_lowerCAmelCase = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCamelCase , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 317
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="resnet50" , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = out_indices if out_indices is not None else [4]
_lowerCAmelCase = stage_names
_lowerCAmelCase = out_features
_lowerCAmelCase = backbone
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = use_pretrained_backbone
_lowerCAmelCase = is_training
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = self.get_config()
return config, pixel_values
def A__ (self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TimmBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
__UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
__UpperCamelCase = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TimmBackboneModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """resnet18"""
_lowerCAmelCase = """microsoft/resnet-18"""
_lowerCAmelCase = AutoBackbone.from_pretrained(lowerCamelCase , use_timm_backbone=lowerCamelCase )
_lowerCAmelCase = AutoBackbone.from_pretrained(lowerCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_lowerCAmelCase = AutoBackbone.from_pretrained(lowerCamelCase , use_timm_backbone=lowerCamelCase , out_indices=[1, 2, 3] )
_lowerCAmelCase = AutoBackbone.from_pretrained(lowerCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowerCamelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowerCAmelCase = self.all_model_classes[0]
_lowerCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
_lowerCAmelCase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model(**lowerCamelCase )
_lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_lowerCAmelCase = model(**lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowerCAmelCase = copy.deepcopy(lowerCamelCase )
_lowerCAmelCase = None
_lowerCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_lowerCAmelCase = model(**lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_lowerCAmelCase = copy.deepcopy(lowerCamelCase )
_lowerCAmelCase = False
_lowerCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_lowerCAmelCase = model(**lowerCamelCase )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE : List[Any] = tuple[int, int]
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = pos_x
_lowerCAmelCase = pos_y
_lowerCAmelCase = (pos_y, pos_x)
_lowerCAmelCase = goal_x
_lowerCAmelCase = goal_y
_lowerCAmelCase = g_cost
_lowerCAmelCase = parent
_lowerCAmelCase = self.calculate_heuristic()
_lowerCAmelCase = self.g_cost + self.h_cost
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.pos_x - self.goal_x
_lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self , lowerCamelCase ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
_lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase )
_lowerCAmelCase = [self.start]
_lowerCAmelCase = []
_lowerCAmelCase = False
def A__ (self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
_lowerCAmelCase = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = []
for action in delta:
_lowerCAmelCase = parent.pos_x + action[1]
_lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = node
_lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = AStar(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = AStar(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = False
def A__ (self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
_lowerCAmelCase = current_bwd_node
_lowerCAmelCase = current_fwd_node
_lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.fwd_astar.retrace_path(lowerCamelCase )
_lowerCAmelCase = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE : List[str] = (0, 0)
SCREAMING_SNAKE_CASE : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE : Any = AStar(init, goal)
SCREAMING_SNAKE_CASE : str = a_star.search()
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
SCREAMING_SNAKE_CASE : List[str] = time.time()
SCREAMING_SNAKE_CASE : List[str] = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE : Any = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 317
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> str:
"""simple docstring"""
if not (isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_lowerCAmelCase = len(snake_case_ )
_lowerCAmelCase = len(snake_case_ )
_lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_lowerCAmelCase = i
_lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = MvpTokenizer
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase ) != add_prefix_space:
_lowerCAmelCase = getattr(lowerCamelCase , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = add_prefix_space
_lowerCAmelCase = pre_tok_class(**lowerCamelCase )
_lowerCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase = """post_processor"""
_lowerCAmelCase = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
if tokenizer_component_instance:
_lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase = tuple(state["""sep"""] )
if "cls" in state:
_lowerCAmelCase = tuple(state["""cls"""] )
_lowerCAmelCase = False
if state.get("""add_prefix_space""" , lowerCamelCase ) != add_prefix_space:
_lowerCAmelCase = add_prefix_space
_lowerCAmelCase = True
if state.get("""trim_offsets""" , lowerCamelCase ) != trim_offsets:
_lowerCAmelCase = trim_offsets
_lowerCAmelCase = True
if changes_to_apply:
_lowerCAmelCase = getattr(lowerCamelCase , state.pop("""type""" ) )
_lowerCAmelCase = component_class(**lowerCamelCase )
setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value
_lowerCAmelCase = value
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 317
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = ['''names''', '''prefix''']
SCREAMING_SNAKE_CASE : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
SCREAMING_SNAKE_CASE : Tuple = ['''encoding_errors''', '''on_bad_lines''']
SCREAMING_SNAKE_CASE : int = ['''date_format''']
@dataclass
class __lowerCamelCase ( datasets.BuilderConfig ):
__UpperCamelCase = ","
__UpperCamelCase = None
__UpperCamelCase = "infer"
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = "."
__UpperCamelCase = None
__UpperCamelCase = '"'
__UpperCamelCase = 0
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = 0
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = 10_000
__UpperCamelCase = None
__UpperCamelCase = "strict"
__UpperCamelCase = "error"
__UpperCamelCase = None
def A__ (self ):
'''simple docstring'''
if self.delimiter is not None:
_lowerCAmelCase = self.delimiter
if self.column_names is not None:
_lowerCAmelCase = self.column_names
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __lowerCamelCase ( datasets.ArrowBasedBuilder ):
__UpperCamelCase = CsvConfig
def A__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase , (str, list, tuple) ):
_lowerCAmelCase = data_files
if isinstance(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = [files]
_lowerCAmelCase = [dl_manager.iter_files(lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = [files]
_lowerCAmelCase = [dl_manager.iter_files(lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase , gen_kwargs={"""files""": files} ) )
return splits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if self.config.features is not None:
_lowerCAmelCase = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowerCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowerCAmelCase = table_cast(lowerCamelCase , lowerCamelCase )
return pa_table
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowerCAmelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase ) ):
_lowerCAmelCase = pd.read_csv(lowerCamelCase , iterator=lowerCamelCase , dtype=lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCamelCase ):
_lowerCAmelCase = pa.Table.from_pandas(lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCamelCase )}: {e}""" )
raise
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCAmelCase = model_type_to_module_name(snake_case_ )
_lowerCAmelCase = importlib.import_module(F""".{module_name}""" , """transformers.models""" )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_ , """__name__""" , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCAmelCase = importlib.import_module("""transformers""" )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def __UpperCAmelCase ( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_ , encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class __lowerCamelCase :
def __init__(self ):
'''simple docstring'''
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase )
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.pop("""config""" , lowerCamelCase )
_lowerCAmelCase = kwargs.pop("""trust_remote_code""" , lowerCamelCase )
_lowerCAmelCase = True
_lowerCAmelCase , _lowerCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = config_dict.get("""feature_extractor_type""" , lowerCamelCase )
_lowerCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_lowerCAmelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase , **lowerCamelCase )
# It could be in `config.feature_extractor_type``
_lowerCAmelCase = getattr(lowerCamelCase , """feature_extractor_type""" , lowerCamelCase )
if hasattr(lowerCamelCase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_lowerCAmelCase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_lowerCAmelCase = feature_extractor_class_from_name(lowerCamelCase )
_lowerCAmelCase = feature_extractor_auto_map is not None
_lowerCAmelCase = feature_extractor_class is not None or type(lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING
_lowerCAmelCase = resolve_trust_remote_code(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if has_remote_code and trust_remote_code:
_lowerCAmelCase = get_class_from_dynamic_module(
lowerCamelCase , lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = kwargs.pop("""code_revision""" , lowerCamelCase )
if os.path.isdir(lowerCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase , **lowerCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase , **lowerCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
_lowerCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase )]
return feature_extractor_class.from_dict(lowerCamelCase , **lowerCamelCase )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase , lowerCamelCase )
| 317
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 1
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
SCREAMING_SNAKE_CASE : List[Any] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
return sd
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : List[Any]=rename_keys_prefix ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
_lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCAmelCase = key
for name_pair in rename_keys_prefix:
_lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCAmelCase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
_lowerCAmelCase = """pretraining"""
if "vcr" in checkpoint_path:
_lowerCAmelCase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCAmelCase = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
_lowerCAmelCase = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
_lowerCAmelCase = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
_lowerCAmelCase = {"""visual_embedding_dim""": 512}
_lowerCAmelCase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_lowerCAmelCase = {"""visual_embedding_dim""": 2048}
_lowerCAmelCase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_lowerCAmelCase = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
_lowerCAmelCase = """vqa"""
elif "nlvr" in checkpoint_path:
_lowerCAmelCase = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
_lowerCAmelCase = """nlvr"""
_lowerCAmelCase = VisualBertConfig(**snake_case_ )
# Load State Dict
_lowerCAmelCase = load_state_dict(snake_case_ )
_lowerCAmelCase = get_new_dict(snake_case_ , snake_case_ )
if model_type == "pretraining":
_lowerCAmelCase = VisualBertForPreTraining(snake_case_ )
elif model_type == "vqa":
_lowerCAmelCase = VisualBertForQuestionAnswering(snake_case_ )
elif model_type == "nlvr":
_lowerCAmelCase = VisualBertForVisualReasoning(snake_case_ )
elif model_type == "multichoice":
_lowerCAmelCase = VisualBertForMultipleChoice(snake_case_ )
model.load_state_dict(snake_case_ )
# Save Checkpoints
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 317
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 1
|
"""simple docstring"""
import requests
SCREAMING_SNAKE_CASE : Dict = '''''' # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE : int = '''https://api.openweathermap.org/data/2.5/'''
def __UpperCAmelCase ( snake_case_ : str = "Chicago" , snake_case_ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def __UpperCAmelCase ( snake_case_ : str = "Kolkata, India" , snake_case_ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def __UpperCAmelCase ( snake_case_ : float = 5_5.6_8 , snake_case_ : float = 1_2.5_7 , snake_case_ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 317
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
SCREAMING_SNAKE_CASE : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def __UpperCAmelCase ( snake_case_ : float , snake_case_ : str , snake_case_ : str ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_lowerCAmelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {", ".join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __UpperCAmelCase ( snake_case_ : Dict ) -> Optional[Any]:
"""simple docstring"""
if hor == 128:
_lowerCAmelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_lowerCAmelCase = (32, 128, 256)
_lowerCAmelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
_lowerCAmelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_lowerCAmelCase = (32, 64, 128, 256)
_lowerCAmelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
_lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_lowerCAmelCase = model.state_dict()
_lowerCAmelCase = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
_lowerCAmelCase = UNetaDModel(**snake_case_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase = state_dict.pop(snake_case_ )
hf_value_function.load_state_dict(snake_case_ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f:
json.dump(snake_case_ , snake_case_ )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_lowerCAmelCase = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
_lowerCAmelCase = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
_lowerCAmelCase = model
_lowerCAmelCase = UNetaDModel(**snake_case_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase = state_dict.pop(snake_case_ )
hf_value_function.load_state_dict(snake_case_ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(snake_case_ , snake_case_ )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int ) -> int:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("""Input value must be an 'int' type""" )
_lowerCAmelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 1
|
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __lowerCamelCase ( __lowercase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = """pt"""
_lowerCAmelCase = """tf"""
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCamelCase )
model_tf.save_pretrained(lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """mock_framework"""
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(lowerCamelCase )
self.assertEqual(lowerCamelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(lowerCamelCase )
self.assertEqual(lowerCamelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCamelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MagicMock(return_value=lowerCamelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCamelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=lowerCamelCase )
with patch("""transformers.onnx.features.is_torch_available""" , lowerCamelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=lowerCamelCase )
_lowerCAmelCase = MagicMock(return_value=lowerCamelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCamelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCamelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=lowerCamelCase )
_lowerCAmelCase = MagicMock(return_value=lowerCamelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCamelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCamelCase ):
with self.assertRaises(lowerCamelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 317
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 1
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( snake_case_ : list[int] ) -> bool:
"""simple docstring"""
return len(set(snake_case_ ) ) == len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 1
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 1
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase = "▁" , lowerCamelCase = True , lowerCamelCase = "<unk>" , lowerCamelCase = "</s>" , lowerCamelCase = "<pad>" , ):
'''simple docstring'''
_lowerCAmelCase = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
_lowerCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_lowerCAmelCase = token_dict["""token"""]
_lowerCAmelCase = Tokenizer(Unigram() )
_lowerCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
_lowerCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCamelCase , add_prefix_space=lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
_lowerCAmelCase = decoders.Metaspace(replacement=lowerCamelCase , add_prefix_space=lowerCamelCase )
_lowerCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
_lowerCAmelCase = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(lowerCamelCase , lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = 8_000 , lowerCamelCase = True , ):
'''simple docstring'''
_lowerCAmelCase = trainers.UnigramTrainer(
vocab_size=lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase , )
if isinstance(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = [files]
self._tokenizer.train(lowerCamelCase , trainer=lowerCamelCase )
self.add_unk_id()
def A__ (self , lowerCamelCase , lowerCamelCase = 8_000 , lowerCamelCase = True , ):
'''simple docstring'''
_lowerCAmelCase = trainers.UnigramTrainer(
vocab_size=lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase , )
self._tokenizer.train_from_iterator(lowerCamelCase , trainer=lowerCamelCase )
self.add_unk_id()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = json.loads(self._tokenizer.to_str() )
_lowerCAmelCase = self.special_tokens["""unk"""]["""id"""]
_lowerCAmelCase = Tokenizer.from_str(json.dumps(lowerCamelCase ) )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE : Union[str, Any] = TypeVar('''T''')
class __lowerCamelCase ( Generic[T] ):
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
def __str__(self ):
'''simple docstring'''
return f"""{self.data}"""
class __lowerCamelCase ( Generic[T] ):
def __init__(self ):
'''simple docstring'''
_lowerCAmelCase = None
def __iter__(self ):
'''simple docstring'''
_lowerCAmelCase = self.top
while node:
yield node.data
_lowerCAmelCase = node.next
def __str__(self ):
'''simple docstring'''
return "->".join([str(lowerCamelCase ) for item in self] )
def __len__(self ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def A__ (self ):
'''simple docstring'''
return self.top is None
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = Node(lowerCamelCase )
if not self.is_empty():
_lowerCAmelCase = self.top
_lowerCAmelCase = node
def A__ (self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , lowerCamelCase )
_lowerCAmelCase = self.top
_lowerCAmelCase = self.top.next
return pop_node.data
def A__ (self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 317
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Any = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class __lowerCamelCase ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__UpperCamelCase = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCamelCase = Features({'question': Value('string' ), 'context': Value('string' )} )
__UpperCamelCase = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
__UpperCamelCase = "question"
__UpperCamelCase = "context"
__UpperCamelCase = "answers"
@property
def A__ (self ):
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = ['image_processor', 'feature_extractor']
__UpperCamelCase = 'TvltImageProcessor'
__UpperCamelCase = 'TvltFeatureExtractor'
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
super().__init__(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
_lowerCAmelCase = image_processor
_lowerCAmelCase = feature_extractor
def __call__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False , *lowerCamelCase , **lowerCamelCase , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_lowerCAmelCase = None
if images is not None:
_lowerCAmelCase = self.image_processor(lowerCamelCase , mask_pixel=lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if images_mixed is not None:
_lowerCAmelCase = self.image_processor(lowerCamelCase , is_mixed=lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if audio is not None:
_lowerCAmelCase = self.feature_extractor(
lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , mask_audio=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = {}
if audio is not None:
output_dict.update(lowerCamelCase )
if images is not None:
output_dict.update(lowerCamelCase )
if images_mixed_dict is not None:
output_dict.update(lowerCamelCase )
return output_dict
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processor.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE : str = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
SCREAMING_SNAKE_CASE : Tuple = json.load(f)
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase ).to(lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = f"""facebook/wmt19-{pair}"""
_lowerCAmelCase = self.get_tokenizer(lowerCamelCase )
_lowerCAmelCase = self.get_model(lowerCamelCase )
_lowerCAmelCase = bleu_data[pair]["""src"""]
_lowerCAmelCase = bleu_data[pair]["""tgt"""]
_lowerCAmelCase = tokenizer(lowerCamelCase , return_tensors="""pt""" , truncation=lowerCamelCase , padding="""longest""" ).to(lowerCamelCase )
_lowerCAmelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_lowerCAmelCase = tokenizer.batch_decode(
lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
_lowerCAmelCase = calculate_bleu(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
self.assertGreaterEqual(scores["""bleu"""] , lowerCamelCase )
| 317
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 1
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : bool = True , snake_case_ : float = math.inf , snake_case_ : float = -math.inf , snake_case_ : float = math.inf , snake_case_ : float = -math.inf , snake_case_ : bool = False , snake_case_ : float = 100 , snake_case_ : float = 0.0_1 , snake_case_ : float = 1 , ) -> Any:
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = search_prob
_lowerCAmelCase = start_temperate
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = None
while not search_end:
_lowerCAmelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCAmelCase = current_state
scores.append(snake_case_ )
iterations += 1
_lowerCAmelCase = None
_lowerCAmelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCAmelCase = random.randint(0 , len(snake_case_ ) - 1 ) # picking a random neighbor
_lowerCAmelCase = neighbors.pop(snake_case_ )
_lowerCAmelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCAmelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCAmelCase = picked_neighbor
else:
_lowerCAmelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCAmelCase = picked_neighbor
_lowerCAmelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCAmelCase = True
else:
_lowerCAmelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case_ ) , snake_case_ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Tuple ) -> List[Any]:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE : Any = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE : Tuple = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
SCREAMING_SNAKE_CASE : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
SCREAMING_SNAKE_CASE : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 317
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase = min(snake_case_ , snake_case_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 317
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __lowerCamelCase ( __lowercase , __lowercase ):
__UpperCamelCase = 'swin'
__UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowerCamelCase=224 , lowerCamelCase=4 , lowerCamelCase=3 , lowerCamelCase=96 , lowerCamelCase=[2, 2, 6, 2] , lowerCamelCase=[3, 6, 12, 24] , lowerCamelCase=7 , lowerCamelCase=4.0 , lowerCamelCase=True , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=False , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=32 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(lowerCamelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
_lowerCAmelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = version.parse('1.11' )
@property
def A__ (self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ (self ):
'''simple docstring'''
return 1e-4
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 1
|
"""simple docstring"""
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = previous
_lowerCAmelCase = next_node
def __str__(self ):
'''simple docstring'''
return f"""{self.data}"""
def A__ (self ):
'''simple docstring'''
return self.data
def A__ (self ):
'''simple docstring'''
return self.next
def A__ (self ):
'''simple docstring'''
return self.previous
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = head
def __iter__(self ):
'''simple docstring'''
return self
def A__ (self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
_lowerCAmelCase = self.current.get_data()
_lowerCAmelCase = self.current.get_next()
return value
class __lowerCamelCase :
def __init__(self ):
'''simple docstring'''
_lowerCAmelCase = None # First node in list
_lowerCAmelCase = None # Last node in list
def __str__(self ):
'''simple docstring'''
_lowerCAmelCase = self.head
_lowerCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
_lowerCAmelCase = current.get_next()
return " ".join(str(lowerCamelCase ) for node in nodes )
def __contains__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.head
while current:
if current.get_data() == value:
return True
_lowerCAmelCase = current.get_next()
return False
def __iter__(self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def A__ (self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def A__ (self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if self.head is None:
_lowerCAmelCase = node
_lowerCAmelCase = node
else:
self.insert_before_node(self.head , lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.insert_after_node(self.tail , lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = Node(lowerCamelCase )
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.set_tail(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = node
_lowerCAmelCase = node.previous
if node.get_previous() is None:
_lowerCAmelCase = node_to_insert
else:
_lowerCAmelCase = node_to_insert
_lowerCAmelCase = node_to_insert
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = node
_lowerCAmelCase = node.next
if node.get_next() is None:
_lowerCAmelCase = node_to_insert
else:
_lowerCAmelCase = node_to_insert
_lowerCAmelCase = node_to_insert
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = Node(lowerCamelCase )
_lowerCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase , lowerCamelCase )
return
current_position += 1
_lowerCAmelCase = node.next
self.insert_after_node(self.tail , lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.head
while node:
if node.get_data() == item:
return node
_lowerCAmelCase = node.get_next()
raise Exception("""Node not found""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if (node := self.get_node(lowerCamelCase )) is not None:
if node == self.head:
_lowerCAmelCase = self.head.get_next()
if node == self.tail:
_lowerCAmelCase = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase )
@staticmethod
def A__ (lowerCamelCase ):
'''simple docstring'''
if node.get_next():
_lowerCAmelCase = node.previous
if node.get_previous():
_lowerCAmelCase = node.next
_lowerCAmelCase = None
_lowerCAmelCase = None
def A__ (self ):
'''simple docstring'''
return self.head is None
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = sd_pipe.prepare_inputs(lowerCamelCase )
_lowerCAmelCase = replicate(lowerCamelCase )
_lowerCAmelCase = shard(lowerCamelCase )
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.split(lowerCamelCase , jax.device_count() )
_lowerCAmelCase = sd_pipe(lowerCamelCase , lowerCamelCase , lowerCamelCase , num_inference_steps=25 , jit=lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase = images[0, 253:256, 253:256, -1]
_lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """stabilityai/stable-diffusion-2"""
_lowerCAmelCase , _lowerCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCamelCase , subfolder="""scheduler""" )
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
lowerCamelCase , scheduler=lowerCamelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
_lowerCAmelCase = scheduler_params
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = sd_pipe.prepare_inputs(lowerCamelCase )
_lowerCAmelCase = replicate(lowerCamelCase )
_lowerCAmelCase = shard(lowerCamelCase )
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.split(lowerCamelCase , jax.device_count() )
_lowerCAmelCase = sd_pipe(lowerCamelCase , lowerCamelCase , lowerCamelCase , num_inference_steps=25 , jit=lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase = images[0, 253:256, 253:256, -1]
_lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 317
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
_lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
_lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
_lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowerCAmelCase = [3, 3, 3, 3]
_lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowerCAmelCase = [4, 4, 4, 4]
_lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
_lowerCAmelCase = [3, 3, 3, 3]
else:
_lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
_lowerCAmelCase = 96
elif "small" in model_name:
_lowerCAmelCase = 96
elif "base" in model_name:
_lowerCAmelCase = 128
elif "large" in model_name:
_lowerCAmelCase = 192
elif "xlarge" in model_name:
_lowerCAmelCase = 256
elif "huge" in model_name:
_lowerCAmelCase = 352
# set label information
_lowerCAmelCase = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
_lowerCAmelCase = """imagenet-22k-id2label.json"""
else:
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = FocalNetConfig(
embed_dim=snake_case_ , depths=snake_case_ , focal_levels=snake_case_ , focal_windows=snake_case_ , use_conv_embed=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , use_post_layernorm=snake_case_ , use_layerscale=snake_case_ , )
return config
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> Any:
"""simple docstring"""
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
_lowerCAmelCase = """encoder.""" + name
if "encoder.layers" in name:
_lowerCAmelCase = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
_lowerCAmelCase = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowerCAmelCase = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowerCAmelCase = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowerCAmelCase = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
_lowerCAmelCase = """layernorm.weight"""
if name == "norm.bias":
_lowerCAmelCase = """layernorm.bias"""
if "head" in name:
_lowerCAmelCase = name.replace("""head""" , """classifier""" )
else:
_lowerCAmelCase = """focalnet.""" + name
return name
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[str]=False ) -> int:
"""simple docstring"""
_lowerCAmelCase = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
_lowerCAmelCase = model_name_to_url[model_name]
print("""Checkpoint URL: """ , snake_case_ )
_lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(snake_case_ )
_lowerCAmelCase = val
_lowerCAmelCase = get_focalnet_config(snake_case_ )
_lowerCAmelCase = FocalNetForImageClassification(snake_case_ )
model.eval()
# load state dict
model.load_state_dict(snake_case_ )
# verify conversion
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = BitImageProcessor(
do_resize=snake_case_ , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case_ , crop_size=224 , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , )
_lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
_lowerCAmelCase = processor(images=snake_case_ , return_tensors="""pt""" )
_lowerCAmelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowerCAmelCase = image_transforms(snake_case_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , snake_case_ , atol=1e-4 )
_lowerCAmelCase = model(**snake_case_ )
_lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
_lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
_lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
_lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
_lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
_lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 317
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 1
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE : Any = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def A__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_lowerCAmelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_lowerCAmelCase = evaluate(dataset=lowerCamelCase , predictions=lowerCamelCase )
return score
| 317
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = '''Tobias Carryer'''
from time import time
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=int(time() ) ): # noqa: B008
'''simple docstring'''
_lowerCAmelCase = multiplier
_lowerCAmelCase = increment
_lowerCAmelCase = modulo
_lowerCAmelCase = seed
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
SCREAMING_SNAKE_CASE : Dict = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( snake_case_ : int = 4 ) -> list[list[int]]:
"""simple docstring"""
_lowerCAmelCase = abs(snake_case_ ) or 4
return [[1 + x + y * row_size for x in range(snake_case_ )] for y in range(snake_case_ )]
def __UpperCAmelCase ( snake_case_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(snake_case_ ) )
# OR.. transpose(reverse_column(matrix))
def __UpperCAmelCase ( snake_case_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(snake_case_ ) )
# OR.. reverse_column(reverse_row(matrix))
def __UpperCAmelCase ( snake_case_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(snake_case_ ) )
# OR.. transpose(reverse_row(matrix))
def __UpperCAmelCase ( snake_case_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
_lowerCAmelCase = [list(snake_case_ ) for x in zip(*snake_case_ )]
return matrix
def __UpperCAmelCase ( snake_case_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
_lowerCAmelCase = matrix[::-1]
return matrix
def __UpperCAmelCase ( snake_case_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
_lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def __UpperCAmelCase ( snake_case_ : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE : Dict = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE : List[str] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 1
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : List[str] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : List[str] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
SCREAMING_SNAKE_CASE : Optional[Any] = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
SCREAMING_SNAKE_CASE : Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
SCREAMING_SNAKE_CASE : str = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 317
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
__UpperCamelCase = 42
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = None
__UpperCamelCase = field(default='Translation' , init=__lowercase , repr=__lowercase )
def __call__(self ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ (self ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = None
__UpperCamelCase = field(default='TranslationVariableLanguages' , init=__lowercase , repr=__lowercase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase = len(self.languages ) if self.languages else None
def __call__(self ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = set(self.languages )
if self.languages and set(lowerCamelCase ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(lowerCamelCase ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase = zip(*sorted(lowerCamelCase ) )
return {"language": languages, "translation": translations}
def A__ (self ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 317
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.