code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import bisect
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = -1 ):
'''simple docstring'''
if hi < 0:
lowercase = len(lowerCAmelCase__ )
while lo < hi:
lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase = mid + 1
else:
lowercase = mid
return lo
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = -1 ):
'''simple docstring'''
if hi < 0:
lowercase = len(lowerCAmelCase__ )
while lo < hi:
lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase = mid + 1
else:
lowercase = mid
return lo
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 1
while left <= right:
lowercase = left + (right - left) // 2
lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase = midpoint - 1
else:
lowercase = midpoint + 1
return None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = bisect.bisect_left(lowerCAmelCase__ , lowerCAmelCase__ )
if index != len(lowerCAmelCase__ ) and sorted_collection[index] == item:
return index
return None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if right < left:
return None
lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , midpoint + 1 , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = input("Enter numbers separated by comma:\n").strip()
lowercase__ :List[str] = sorted(int(item) for item in user_input.split(","))
lowercase__ :Union[str, Any] = int(input("Enter a single number to be found in the list:\n"))
lowercase__ :int = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 633 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase :
def __init__( self ,A__ ,A__=1_4 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_input_mask
lowercase = use_labels
lowercase = use_mc_token_ids
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
if self.use_mc_token_ids:
lowercase = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A__ ( self):
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,*A__):
lowercase = CTRLModel(config=A__)
model.to(A__)
model.eval()
model(A__ ,token_type_ids=A__ ,head_mask=A__)
model(A__ ,token_type_ids=A__)
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) ,config.n_layer)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,*A__):
lowercase = CTRLLMHeadModel(A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.loss.shape ,())
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,*A__):
lowercase = self.num_labels
lowercase = CTRLForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = model(A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Optional[int] =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase_ : List[Any] =(CTRLLMHeadModel,) if is_torch_available() else ()
lowercase_ : Union[str, Any] =(
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Optional[Any] =True
lowercase_ : List[str] =False
lowercase_ : Optional[int] =False
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def A__ ( self):
lowercase = CTRLModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,n_embd=3_7)
def A__ ( self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A__)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A__ ( self):
pass
@slow
def A__ ( self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = CTRLModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def A__ ( self):
pass
@require_torch
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def A__ ( self):
lowercase = CTRLLMHeadModel.from_pretrained('''ctrl''')
model.to(A__)
lowercase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] ,dtype=torch.long ,device=A__) # Legal the president is
lowercase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase = model.generate(A__ ,do_sample=A__)
self.assertListEqual(output_ids[0].tolist() ,A__)
| 633 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ :int = logging.get_logger(__name__)
lowercase__ :Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] ='''table-transformer'''
lowercase_ : Any =['''past_key_values''']
lowercase_ : Tuple ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self ,A__=True ,A__=None ,A__=3 ,A__=1_0_0 ,A__=6 ,A__=2_0_4_8 ,A__=8 ,A__=6 ,A__=2_0_4_8 ,A__=8 ,A__=0.0 ,A__=0.0 ,A__=True ,A__="relu" ,A__=2_5_6 ,A__=0.1 ,A__=0.0 ,A__=0.0 ,A__=0.02 ,A__=1.0 ,A__=False ,A__="sine" ,A__="resnet50" ,A__=True ,A__=False ,A__=1 ,A__=5 ,A__=2 ,A__=1 ,A__=1 ,A__=5 ,A__=2 ,A__=0.1 ,**A__ ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(A__ ,A__):
lowercase = backbone_config.get('''model_type''')
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(A__)
# set timm attributes to None
lowercase , lowercase , lowercase = None, None, None
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = eos_coefficient
super().__init__(is_encoder_decoder=A__ ,**A__)
@property
def A__ ( self):
return self.encoder_attention_heads
@property
def A__ ( self):
return self.d_model
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str =version.parse('''1.11''' )
@property
def A__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def A__ ( self):
return 1E-5
@property
def A__ ( self):
return 1_2
| 633 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowercase__ :Union[str, Any] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowercase__ :Optional[int] = dataset.iloc[:, 1:2].values
lowercase__ :List[Any] = dataset.iloc[:, 2].values
lowercase__ , lowercase__ , lowercase__ , lowercase__ :Union[str, Any] = train_test_split(X, y, test_size=0.2, random_state=0)
lowercase__ :Dict = PolynomialFeatures(degree=4)
lowercase__ :List[str] = poly_reg.fit_transform(X)
lowercase__ :Any = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase ( ):
'''simple docstring'''
plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color='''red''' )
plt.plot(lowerCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(lowerCAmelCase__ ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 633 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowercase__ :List[Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,**A__):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' ,A__ ,)
super().__init__(*A__ ,**A__)
| 633 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
lowercase_ : Dict =MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : Optional[int] =TF_MODEL_FOR_MASKED_LM_MAPPING
def A__ ( self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A__ ( self):
lowercase = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''tf''')
lowercase = unmasker('''My name is <mask>''')
self.assertEqual(
nested_simplify(A__ ,decimals=6) ,[
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] ,)
lowercase = unmasker('''The largest city in France is <mask>''')
self.assertEqual(
nested_simplify(A__ ,decimals=6) ,[
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] ,)
lowercase = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3)
self.assertEqual(
nested_simplify(A__ ,decimals=6) ,[
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] ,)
@require_torch
def A__ ( self):
lowercase = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''pt''')
lowercase = unmasker('''My name is <mask>''')
self.assertEqual(
nested_simplify(A__ ,decimals=6) ,[
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] ,)
lowercase = unmasker('''The largest city in France is <mask>''')
self.assertEqual(
nested_simplify(A__ ,decimals=6) ,[
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] ,)
lowercase = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3)
self.assertEqual(
nested_simplify(A__ ,decimals=6) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] ,)
lowercase = unmasker('''My name is <mask> <mask>''' ,top_k=2)
self.assertEqual(
nested_simplify(A__ ,decimals=6) ,[
[
{
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] ,)
@require_torch_gpu
def A__ ( self):
lowercase = pipeline('''fill-mask''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,device=0 ,framework='''pt''')
# convert model to fp16
pipe.model.half()
lowercase = pipe('''Paris is the [MASK] of France.''')
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(A__ ,A__)
@slow
@require_torch
def A__ ( self):
lowercase = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''pt''')
self.run_large_test(A__)
@slow
@require_tf
def A__ ( self):
lowercase = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''tf''')
self.run_large_test(A__)
def A__ ( self ,A__):
lowercase = unmasker('''My name is <mask>''')
self.assertEqual(
nested_simplify(A__) ,[
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] ,)
lowercase = unmasker('''The largest city in France is <mask>''')
self.assertEqual(
nested_simplify(A__) ,[
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] ,)
lowercase = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3)
self.assertEqual(
nested_simplify(A__) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] ,)
@require_torch
def A__ ( self):
lowercase = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''pt''')
lowercase = None
lowercase = None
self.run_pipeline_test(A__ ,[])
@require_tf
def A__ ( self):
lowercase = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''tf''')
lowercase = None
lowercase = None
self.run_pipeline_test(A__ ,[])
def A__ ( self ,A__ ,A__ ,A__):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''')
lowercase = FillMaskPipeline(model=A__ ,tokenizer=A__)
lowercase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def A__ ( self ,A__ ,A__):
lowercase = fill_masker.tokenizer
lowercase = fill_masker.model
lowercase = fill_masker(
f'This is a {tokenizer.mask_token}' ,)
self.assertEqual(
A__ ,[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
] ,)
lowercase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
A__ ,[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
] ,)
lowercase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
A__ ,[
[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
],
[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
],
] ,)
with self.assertRaises(A__):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(A__):
fill_masker('''This is''')
self.run_test_top_k(A__ ,A__)
self.run_test_targets(A__ ,A__)
self.run_test_top_k_targets(A__ ,A__)
self.fill_mask_with_duplicate_targets_and_top_k(A__ ,A__)
self.fill_mask_with_multiple_masks(A__ ,A__)
def A__ ( self ,A__ ,A__):
lowercase = tokenizer.get_vocab()
lowercase = sorted(vocab.keys())[:2]
# Pipeline argument
lowercase = FillMaskPipeline(model=A__ ,tokenizer=A__ ,targets=A__)
lowercase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
A__ ,[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
] ,)
lowercase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,A__)
lowercase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(A__))
# Call argument
lowercase = FillMaskPipeline(model=A__ ,tokenizer=A__)
lowercase = fill_masker(f'This is a {tokenizer.mask_token}' ,targets=A__)
self.assertEqual(
A__ ,[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
] ,)
lowercase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,A__)
lowercase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(A__))
# Score equivalence
lowercase = fill_masker(f'This is a {tokenizer.mask_token}' ,targets=A__)
lowercase = [top_mask['''token_str'''] for top_mask in outputs]
lowercase = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A__) == set(A__):
lowercase = fill_masker(f'This is a {tokenizer.mask_token}' ,targets=A__)
lowercase = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(A__) ,nested_simplify(A__))
# Raises with invalid
with self.assertRaises(A__):
lowercase = fill_masker(f'This is a {tokenizer.mask_token}' ,targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(A__):
lowercase = fill_masker(f'This is a {tokenizer.mask_token}' ,targets=[''''''])
with self.assertRaises(A__):
lowercase = fill_masker(f'This is a {tokenizer.mask_token}' ,targets='''''')
def A__ ( self ,A__ ,A__):
lowercase = FillMaskPipeline(model=A__ ,tokenizer=A__ ,top_k=2)
lowercase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
A__ ,[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
] ,)
lowercase = FillMaskPipeline(model=A__ ,tokenizer=A__)
lowercase = fill_masker(f'This is a {tokenizer.mask_token}' ,top_k=2)
self.assertEqual(
A__ ,[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
] ,)
self.assertEqual(nested_simplify(A__) ,nested_simplify(A__))
def A__ ( self ,A__ ,A__):
lowercase = tokenizer.get_vocab()
lowercase = FillMaskPipeline(model=A__ ,tokenizer=A__)
# top_k=2, ntargets=3
lowercase = sorted(vocab.keys())[:3]
lowercase = fill_masker(f'This is a {tokenizer.mask_token}' ,top_k=2 ,targets=A__)
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowercase = [el['''token_str'''] for el in sorted(A__ ,key=lambda A__: x["score"] ,reverse=A__)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A__).issubset(A__):
lowercase = fill_masker(f'This is a {tokenizer.mask_token}' ,top_k=3 ,targets=A__)
# They should yield exactly the same result
self.assertEqual(nested_simplify(A__) ,nested_simplify(A__))
def A__ ( self ,A__ ,A__):
lowercase = FillMaskPipeline(model=A__ ,tokenizer=A__)
lowercase = tokenizer.get_vocab()
# String duplicates + id duplicates
lowercase = sorted(vocab.keys())[:3]
lowercase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowercase = fill_masker(f'My name is {tokenizer.mask_token}' ,targets=A__ ,top_k=1_0)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(A__) ,3)
def A__ ( self ,A__ ,A__):
lowercase = FillMaskPipeline(model=A__ ,tokenizer=A__)
lowercase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' ,top_k=2)
self.assertEqual(
A__ ,[
[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
],
[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
],
[
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
{'''sequence''': ANY(A__), '''score''': ANY(A__), '''token''': ANY(A__), '''token_str''': ANY(A__)},
],
] ,)
| 633 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 1 |
import gc
import threading
import time
import psutil
import torch
class lowercase :
def __init__( self):
lowercase = psutil.Process()
lowercase = False
def A__ ( self):
lowercase = -1
while True:
lowercase = max(self.process.memory_info().rss ,self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A__ ( self):
lowercase = True
lowercase = threading.Thread(target=self.peak_monitor)
lowercase = True
self.thread.start()
def A__ ( self):
lowercase = False
self.thread.join()
return self.cpu_memory_peak
lowercase__ :List[Any] = PeakCPUMemory()
def UpperCamelCase ( ):
'''simple docstring'''
# Time
lowercase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = torch.cuda.memory_allocated(lowerCAmelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Time
lowercase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
lowercase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = (torch.cuda.memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
lowercase = (torch.cuda.max_memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
return measures
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(lowerCAmelCase__ )]:.2f}MiB' )
lowercase = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 633 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowercase__ :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ :Dict = 256
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str =['''melgan''']
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
# From MELGAN
lowercase = math.log(1E-5) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 1_2_8
self.register_modules(
notes_encoder=A__ ,continuous_encoder=A__ ,decoder=A__ ,scheduler=A__ ,melgan=A__ ,)
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False):
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(A__ ,self.min_value ,self.max_value)
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False):
lowercase , lowercase = input_range
lowercase = torch.clip(A__ ,A__ ,A__) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self ,A__ ,A__ ,A__):
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=A__ ,encoder_inputs_mask=A__)
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=A__ ,encoder_inputs_mask=A__)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self ,A__ ,A__ ,A__):
lowercase = noise_time
if not torch.is_tensor(A__):
lowercase = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device)
elif torch.is_tensor(A__) and len(timesteps.shape) == 0:
lowercase = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device)
lowercase = self.decoder(
encodings_and_masks=A__ ,decoder_input_tokens=A__ ,decoder_noise_time=A__)
return logits
@torch.no_grad()
def __call__( self ,A__ ,A__ = None ,A__ = 1_0_0 ,A__ = True ,A__ = "numpy" ,A__ = None ,A__ = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__)}.')
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa)
lowercase = np.zeros([1, 0, self.n_dims] ,np.floataa)
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device)
for i, encoder_input_tokens in enumerate(A__):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device ,dtype=self.decoder.dtype)
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
A__ ,output_range=[-1.0, 1.0] ,clip=A__)
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) ,continuous_inputs=A__ ,continuous_mask=A__ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=A__ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(A__)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
lowercase = self.decode(
encodings_and_masks=A__ ,input_tokens=A__ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(A__ ,A__ ,A__ ,generator=A__).prev_sample
lowercase = self.scale_to_features(A__ ,input_range=[-1.0, 1.0])
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__)
logger.info('''Generated segment''' ,A__)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__)
| 633 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :Optional[int] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 1 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase = 0
print(lowerCAmelCase__ , end=''',''' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=''',''' )
lowercase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ :Dict = [1, 3, 0, 5, 8, 5]
lowercase__ :Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 633 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 1 |
def UpperCamelCase ( lowerCAmelCase__ = 10 ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or n < 0:
raise ValueError('''Invalid input''' )
lowercase = 10**n
lowercase = 2_8433 * (pow(2 , 783_0457 , lowerCAmelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }')
| 633 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase__ :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ :Union[str, Any] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[PIL.Image.Image, np.ndarray]
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
self.register_modules(
prior=A__ ,image_encoder=A__ ,image_processor=A__ ,scheduler=A__ ,renderer=A__ ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
if latents is None:
lowercase = randn_tensor(A__ ,generator=A__ ,device=A__ ,dtype=A__)
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}')
lowercase = latents.to(A__)
lowercase = latents * scheduler.init_noise_sigma
return latents
def A__ ( self ,A__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
lowercase = torch.device(f'cuda:{gpu_id}')
lowercase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ ,A__)
@property
def A__ ( self):
if self.device != torch.device('''meta''') or not hasattr(self.image_encoder ,'''_hf_hook'''):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(A__ ,'''_hf_hook''')
and hasattr(module._hf_hook ,'''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,):
if isinstance(A__ ,A__) and isinstance(image[0] ,torch.Tensor):
lowercase = torch.cat(A__ ,axis=0) if image[0].ndim == 4 else torch.stack(A__ ,axis=0)
if not isinstance(A__ ,torch.Tensor):
lowercase = self.image_processor(A__ ,return_tensors='''pt''').pixel_values[0].unsqueeze(0)
lowercase = image.to(dtype=self.image_encoder.dtype ,device=A__)
lowercase = self.image_encoder(A__)['''last_hidden_state''']
lowercase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase = image_embeds.repeat_interleave(A__ ,dim=0)
if do_classifier_free_guidance:
lowercase = torch.zeros_like(A__)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(A__)
def __call__( self ,A__ ,A__ = 1 ,A__ = 2_5 ,A__ = None ,A__ = None ,A__ = 4.0 ,A__ = 6_4 ,A__ = "pil" ,A__ = True ,):
if isinstance(A__ ,PIL.Image.Image):
lowercase = 1
elif isinstance(A__ ,torch.Tensor):
lowercase = image.shape[0]
elif isinstance(A__ ,A__) and isinstance(image[0] ,(torch.Tensor, PIL.Image.Image)):
lowercase = len(A__)
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(A__)}')
lowercase = self._execution_device
lowercase = batch_size * num_images_per_prompt
lowercase = guidance_scale > 1.0
lowercase = self._encode_image(A__ ,A__ ,A__ ,A__)
# prior
self.scheduler.set_timesteps(A__ ,device=A__)
lowercase = self.scheduler.timesteps
lowercase = self.prior.config.num_embeddings
lowercase = self.prior.config.embedding_dim
lowercase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) ,image_embeds.dtype ,A__ ,A__ ,A__ ,self.scheduler ,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase = latents.reshape(latents.shape[0] ,A__ ,A__)
for i, t in enumerate(self.progress_bar(A__)):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(A__ ,A__)
lowercase = self.prior(
A__ ,timestep=A__ ,proj_embedding=A__ ,).predicted_image_embedding
# remove the variance
lowercase , lowercase = noise_pred.split(
scaled_model_input.shape[2] ,dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase , lowercase = noise_pred.chunk(2)
lowercase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase = self.scheduler.step(
A__ ,timestep=A__ ,sample=A__ ,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=A__)
lowercase = []
for i, latent in enumerate(A__):
print()
lowercase = self.renderer.decode(
latent[None, :] ,A__ ,size=A__ ,ray_batch_size=4_0_9_6 ,n_coarse_samples=6_4 ,n_fine_samples=1_2_8 ,)
images.append(A__)
lowercase = torch.stack(A__)
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}')
lowercase = images.cpu().numpy()
if output_type == "pil":
lowercase = [self.numpy_to_pil(A__) for image in images]
# Offload last model to CPU
if hasattr(self ,'''final_offload_hook''') and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=A__)
| 633 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :int = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase__ :Dict = 25_6047
lowercase__ :int = 25_6145
@require_sentencepiece
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =NllbTokenizer
lowercase_ : Optional[int] =NllbTokenizerFast
lowercase_ : Optional[int] =True
lowercase_ : Dict =True
lowercase_ : Optional[int] ={}
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = NllbTokenizer(A__ ,keep_accents=A__)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
lowercase = NllbTokenizer(A__ ,keep_accents=A__)
lowercase = tokenizer.tokenize('''This is a test''')
self.assertListEqual(A__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase = tokenizer.convert_tokens_to_ids(A__)
self.assertListEqual(
A__ ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] ,)
lowercase = tokenizer.convert_ids_to_tokens(A__)
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def A__ ( self):
lowercase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(A__)
lowercase = tokenizer_p.save_pretrained(A__)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
lowercase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f)
self.assertSequenceEqual(A__ ,A__)
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(A__)
lowercase = tokenizer_p.from_pretrained(A__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ ,A__))
shutil.rmtree(A__)
# Save tokenizer rust, legacy_format=True
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(A__ ,legacy_format=A__)
lowercase = tokenizer_p.save_pretrained(A__)
# Checks it save with the same files
self.assertSequenceEqual(A__ ,A__)
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(A__)
lowercase = tokenizer_p.from_pretrained(A__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ ,A__))
shutil.rmtree(A__)
# Save tokenizer rust, legacy_format=False
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(A__ ,legacy_format=A__)
lowercase = tokenizer_p.save_pretrained(A__)
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(A__)
lowercase = tokenizer_p.from_pretrained(A__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ ,A__))
shutil.rmtree(A__)
@require_torch
def A__ ( self):
if not self.test_seqaseq:
return
lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# Longer text that will definitely require truncation.
lowercase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
lowercase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
lowercase = tokenizer.prepare_seqaseq_batch(
src_texts=A__ ,tgt_texts=A__ ,max_length=3 ,max_target_length=1_0 ,return_tensors='''pt''' ,src_lang='''eng_Latn''' ,tgt_lang='''ron_Latn''' ,)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] ,3)
self.assertEqual(batch.labels.shape[1] ,1_0)
# max_target_length will default to max_length if not specified
lowercase = tokenizer.prepare_seqaseq_batch(
A__ ,tgt_texts=A__ ,max_length=3 ,return_tensors='''pt''')
self.assertEqual(batch.input_ids.shape[1] ,3)
self.assertEqual(batch.labels.shape[1] ,3)
lowercase = tokenizer.prepare_seqaseq_batch(
src_texts=A__ ,max_length=3 ,max_target_length=1_0 ,return_tensors='''pt''')
self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3)
self.assertNotIn('''decoder_input_ids''' ,A__)
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''')
def A__ ( self):
pass
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = [AddedToken('''<special>''' ,lstrip=A__)]
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,additional_special_tokens=A__ ,**A__)
lowercase = tokenizer_r.encode('''Hey this is a <special> token''')
lowercase = tokenizer_r.encode('''<special>''' ,add_special_tokens=A__)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,additional_special_tokens=A__ ,**A__ ,)
lowercase = self.tokenizer_class.from_pretrained(
A__ ,additional_special_tokens=A__ ,**A__)
lowercase = tokenizer_p.encode('''Hey this is a <special> token''')
lowercase = tokenizer_cr.encode('''Hey this is a <special> token''')
self.assertEqual(A__ ,A__)
self.assertEqual(A__ ,A__)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
lowercase_ : int ='''facebook/nllb-200-distilled-600M'''
lowercase_ : Optional[int] =[
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowercase_ : str =[
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowercase_ : Tuple =[
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def A__ ( cls):
lowercase = NllbTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''eng_Latn''' ,tgt_lang='''ron_Latn''')
lowercase = 1
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] ,2_5_6_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] ,2_5_6_0_0_2)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] ,2_5_6_0_5_7)
def A__ ( self):
lowercase = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,A__)
def A__ ( self):
self.assertIn(A__ ,self.tokenizer.all_special_ids)
# fmt: off
lowercase = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
lowercase = self.tokenizer.decode(A__ ,skip_special_tokens=A__)
lowercase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=A__)
self.assertEqual(A__ ,A__)
self.assertNotIn(self.tokenizer.eos_token ,A__)
def A__ ( self):
lowercase = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] ,A__)
lowercase = 1_0
lowercase = self.tokenizer(A__ ,max_length=A__ ,truncation=A__).input_ids[0]
self.assertEqual(ids[-1] ,2)
self.assertEqual(ids[0] ,A__)
self.assertEqual(len(A__) ,A__)
def A__ ( self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR''']) ,[2_5_6_2_0_3, 3])
def A__ ( self):
lowercase = tempfile.mkdtemp()
lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__)
lowercase = NllbTokenizer.from_pretrained(A__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,A__)
@require_torch
def A__ ( self):
lowercase = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=A__ ,truncation=A__ ,max_length=len(self.expected_src_tokens) ,return_tensors='''pt''' ,)
lowercase = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id['''ron_Latn'''])
self.assertIsInstance(A__ ,A__)
self.assertEqual((2, 1_5) ,batch.input_ids.shape)
self.assertEqual((2, 1_5) ,batch.attention_mask.shape)
lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,A__)
self.assertEqual(A__ ,batch.decoder_input_ids[0, 0]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id])
def A__ ( self):
lowercase = self.tokenizer(self.src_text ,padding=A__ ,truncation=A__ ,max_length=3 ,return_tensors='''pt''')
lowercase = self.tokenizer(
text_target=self.tgt_text ,padding=A__ ,truncation=A__ ,max_length=1_0 ,return_tensors='''pt''')
lowercase = targets['''input_ids''']
lowercase = shift_tokens_right(
A__ ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,)
self.assertEqual(batch.input_ids.shape[1] ,3)
self.assertEqual(batch.decoder_input_ids.shape[1] ,1_0)
@require_torch
def A__ ( self):
lowercase = self.tokenizer._build_translation_inputs(
'''A test''' ,return_tensors='''pt''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''')
self.assertEqual(
nested_simplify(A__) ,{
# A, test, EOS, en_XX
'''input_ids''': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_6_0_5_7,
} ,)
@require_torch
def A__ ( self):
lowercase = True
lowercase = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''')
self.assertEqual(
inputs.input_ids ,[1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7])
lowercase = False
lowercase = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''')
self.assertEqual(
inputs.input_ids ,[2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2])
| 633 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ :Tuple = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = ["LayoutLMv3FeatureExtractor"]
lowercase__ :Any = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowercase__ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowercase__ :Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''')
self.register_modules(
speech_model=A__ ,speech_processor=A__ ,vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,unet=A__ ,scheduler=A__ ,feature_extractor=A__ ,)
def A__ ( self ,A__ = "auto"):
if slice_size == "auto":
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__)
def A__ ( self):
self.enable_attention_slicing(A__)
@torch.no_grad()
def __call__( self ,A__ ,A__=1_6_0_0_0 ,A__ = 5_1_2 ,A__ = 5_1_2 ,A__ = 5_0 ,A__ = 7.5 ,A__ = None ,A__ = 1 ,A__ = 0.0 ,A__ = None ,A__ = None ,A__ = "pil" ,A__ = True ,A__ = None ,A__ = 1 ,**A__ ,):
lowercase = self.speech_processor.feature_extractor(
A__ ,return_tensors='''pt''' ,sampling_rate=A__).input_features.to(self.device)
lowercase = self.speech_model.generate(A__ ,max_length=4_8_0_0_0_0)
lowercase = self.speech_processor.tokenizer.batch_decode(A__ ,skip_special_tokens=A__ ,normalize=A__)[
0
]
if isinstance(A__ ,A__):
lowercase = 1
elif isinstance(A__ ,A__):
lowercase = len(A__)
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A__)}')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__)}.')
# get prompt text embeddings
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=self.tokenizer.model_max_length ,return_tensors='''pt''' ,)
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}')
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 ,A__ ,1)
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt ,A__ ,-1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = [''''''] * batch_size
elif type(A__) is not type(A__):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A__)} !='
f' {type(A__)}.')
elif isinstance(A__ ,A__):
lowercase = [negative_prompt]
elif batch_size != len(A__):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A__)}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''')
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=A__ ,truncation=A__ ,return_tensors='''pt''' ,)
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(1 ,A__ ,1)
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt ,A__ ,-1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(
self.device)
else:
lowercase = torch.randn(A__ ,generator=A__ ,device=self.device ,dtype=A__)
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
lowercase = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(A__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(A__)):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(A__ ,A__)
# predict the noise residual
lowercase = self.unet(A__ ,A__ ,encoder_hidden_states=A__).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2)
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__ ,A__)
lowercase = 1 / 0.18215 * latents
lowercase = self.vae.decode(A__).sample
lowercase = (image / 2 + 0.5).clamp(0 ,1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 ,2 ,3 ,1).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(A__)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A__ ,nsfw_content_detected=A__)
| 633 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self ,A__ ,A__=3 ,A__=3_2 ,A__=3 ,A__=1_0 ,A__=[1_0, 2_0, 3_0, 4_0] ,A__=[1, 1, 2, 1] ,A__=True ,A__=True ,A__="relu" ,A__=3 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(A__)
def A__ ( self):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.num_labels)
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = TFRegNetModel(config=A__)
lowercase = model(A__ ,training=A__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = TFRegNetForImageClassification(A__)
lowercase = model(A__ ,labels=A__ ,training=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase_ : Optional[int] =(
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ : Any =False
lowercase_ : Optional[Any] =False
lowercase_ : Union[str, Any] =False
lowercase_ : Tuple =False
lowercase_ : Any =False
def A__ ( self):
lowercase = TFRegNetModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,has_text_modality=A__)
def A__ ( self):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def A__ ( self):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 ,reason='''TF does not support backprop for grouped convolutions on CPU.''' ,)
@slow
def A__ ( self):
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def A__ ( self):
pass
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__)
lowercase = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
def check_hidden_states_output(A__ ,A__ ,A__):
lowercase = model_class(A__)
lowercase = model(**self._prepare_for_class(A__ ,A__) ,training=A__)
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(A__) ,expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(A__ ,A__ ,A__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(A__ ,A__ ,A__)
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(A__ ,A__ ,A__ ,A__={}):
lowercase = model(A__ ,return_dict=A__ ,**A__)
lowercase = model(A__ ,return_dict=A__ ,**A__).to_tuple()
def recursive_check(A__ ,A__):
if isinstance(A__ ,(List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(A__ ,A__):
recursive_check(A__ ,A__)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(A__ ,A__)) ,msg=(
'''Tuple and dict output are not equal. Difference:'''
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}'
) ,)
recursive_check(A__ ,A__)
for model_class in self.all_model_classes:
lowercase = model_class(A__)
lowercase = self._prepare_for_class(A__ ,A__)
lowercase = self._prepare_for_class(A__ ,A__)
check_equivalence(A__ ,A__ ,A__)
lowercase = self._prepare_for_class(A__ ,A__ ,return_labels=A__)
lowercase = self._prepare_for_class(A__ ,A__ ,return_labels=A__)
check_equivalence(A__ ,A__ ,A__)
lowercase = self._prepare_for_class(A__ ,A__)
lowercase = self._prepare_for_class(A__ ,A__)
check_equivalence(A__ ,A__ ,A__ ,{'''output_hidden_states''': True})
lowercase = self._prepare_for_class(A__ ,A__ ,return_labels=A__)
lowercase = self._prepare_for_class(A__ ,A__ ,return_labels=A__)
check_equivalence(A__ ,A__ ,A__ ,{'''output_hidden_states''': True})
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__)
@slow
def A__ ( self):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFRegNetModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def A__ ( self):
lowercase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ ,return_tensors='''tf''')
# forward pass
lowercase = model(**A__ ,training=A__)
# verify the logits
lowercase = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,A__)
lowercase = tf.constant([-0.4180, -1.5051, -3.4836])
tf.debugging.assert_near(outputs.logits[0, :3] ,A__ ,atol=1E-4)
| 633 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase :
pass
| 633 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 1 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
lowercase = f'The input value of [n={number}] has to be > 0'
raise ValueError(lowerCAmelCase__ )
else:
lowercase = sylvester(number - 1 )
lowercase = num - 1
lowercase = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 633 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =KandinskyInpaintPipeline
lowercase_ : int =['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowercase_ : Dict =[
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowercase_ : Tuple =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase_ : Optional[int] =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 1_0_0
@property
def A__ ( self):
lowercase = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''')
return tokenizer
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_0_0_5 ,)
lowercase = MultilingualCLIP(A__)
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase = UNetaDConditionModel(**A__)
return model
@property
def A__ ( self):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = VQModel(**self.dummy_movq_kwargs)
return model
def A__ ( self):
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='''linear''' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=A__ ,set_alpha_to_one=A__ ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=A__ ,)
lowercase = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A__ ( self ,A__ ,A__=0):
lowercase = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1)).to(A__)
# create init_image
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(A__)).to(A__)
lowercase = image.cpu().permute(0 ,2 ,3 ,1)[0]
lowercase = Image.fromarray(np.uinta(A__)).convert('''RGB''').resize((2_5_6, 2_5_6))
# create mask
lowercase = np.ones((6_4, 6_4) ,dtype=np.floataa)
lowercase = 0
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(A__) ,return_dict=A__ ,)[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 6_4, 6_4, 3)
lowercase = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''')
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
lowercase = np.ones((7_6_8, 7_6_8) ,dtype=np.floataa)
lowercase = 0
lowercase = '''a hat'''
lowercase = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' ,torch_dtype=torch.floataa)
pipe_prior.to(A__)
lowercase = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' ,torch_dtype=torch.floataa)
lowercase = pipeline.to(A__)
pipeline.set_progress_bar_config(disable=A__)
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase , lowercase = pipe_prior(
A__ ,generator=A__ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowercase = pipeline(
A__ ,image=A__ ,mask_image=A__ ,image_embeds=A__ ,negative_image_embeds=A__ ,generator=A__ ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(A__ ,A__)
| 633 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 1 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Load checkpoint
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase = v
else:
lowercase = v
lowercase = chkpt['''params''']
lowercase = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase__ , (torch.FloatTensor, numpy.ndarray) )}
lowercase = chkpt['''dico_word2id''']
lowercase = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowercase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :Tuple = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 633 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
lowercase__ :Dict = logging.getLogger(__name__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = git.Repo(search_parent_directories=lowerCAmelCase__ )
lowercase = {
'''repo_id''': str(lowerCAmelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ['''WORLD_SIZE'''] )
lowercase = int(os.environ['''N_GPU_NODE'''] )
lowercase = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = f'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 633 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 1 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [[float('''inf''' ) for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
lowercase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCAmelCase__ ):
# looping through rows of graph array
for i in range(lowerCAmelCase__ ):
# looping through columns of graph array
for j in range(lowerCAmelCase__ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase = dist[i][k] + dist[k][j]
_print_dist(lowerCAmelCase__ , lowerCAmelCase__ )
return dist, v
if __name__ == "__main__":
lowercase__ :Optional[Any] = int(input("Enter number of vertices: "))
lowercase__ :str = int(input("Enter number of edges: "))
lowercase__ :Optional[Any] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
lowercase__ :Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
lowercase__ :Dict = int(input("Enter source:"))
lowercase__ :Union[str, Any] = int(input("Enter destination:"))
lowercase__ :Dict = float(input("Enter weight:"))
lowercase__ :str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 633 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModel.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModel.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForPreTraining.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForPreTraining.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForCausalLM.from_pretrained(A__ ,from_pt=A__)
lowercase , lowercase = TFAutoModelForCausalLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForCausalLM.from_pretrained(A__ ,from_tf=A__)
lowercase , lowercase = AutoModelForCausalLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelWithLMHead.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForMaskedLM.from_pretrained(A__ ,from_pt=A__)
lowercase , lowercase = TFAutoModelForMaskedLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForMaskedLM.from_pretrained(A__ ,from_tf=A__)
lowercase , lowercase = AutoModelForMaskedLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(A__ ,from_pt=A__)
lowercase , lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ,from_tf=A__)
lowercase , lowercase = AutoModelForSeqaSeqLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForSequenceClassification.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForSequenceClassification.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForQuestionAnswering.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForQuestionAnswering.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
def A__ ( self):
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ ,from_pt=A__)
self.assertIsInstance(A__ ,A__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=A__) ,1_4_4_1_0)
lowercase = AutoModelWithLMHead.from_pretrained(A__ ,from_tf=A__)
self.assertIsInstance(A__ ,A__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=A__) ,1_4_4_1_0)
def A__ ( self):
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ ,from_pt=A__)
self.assertIsInstance(A__ ,A__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=A__) ,1_4_4_1_0)
lowercase = AutoModelWithLMHead.from_pretrained(A__ ,from_tf=A__)
self.assertIsInstance(A__ ,A__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=A__) ,1_4_4_1_0)
| 633 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ :Union[str, Any] = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowercase__ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
lowercase = AutoTokenizer.from_pretrained('''google/mt5-small''')
lowercase = tokenizer('''Hello there''' ,return_tensors='''np''').input_ids
lowercase = tokenizer('''Hi I am''' ,return_tensors='''np''').input_ids
lowercase = shift_tokens_right(A__ ,model.config.pad_token_id ,model.config.decoder_start_token_id)
lowercase = model(A__ ,decoder_input_ids=A__).logits
lowercase = optax.softmax_cross_entropy(A__ ,onehot(A__ ,logits.shape[-1])).mean()
lowercase = -(labels.shape[-1] * loss.item())
lowercase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 633 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :int = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str ='''nllb-moe'''
lowercase_ : Optional[int] =['''past_key_values''']
lowercase_ : Any ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,A__=1_2_8_1_1_2 ,A__=1_0_2_4 ,A__=1_2 ,A__=4_0_9_6 ,A__=1_6 ,A__=1_2 ,A__=4_0_9_6 ,A__=1_6 ,A__=0.05 ,A__=0.05 ,A__=True ,A__=True ,A__="relu" ,A__=1_0_2_4 ,A__=0.1 ,A__=0.1 ,A__=0.0 ,A__=0.02 ,A__=2 ,A__=True ,A__=False ,A__="float32" ,A__=False ,A__=1_2_8 ,A__=6_4 ,A__=4 ,A__=4 ,A__=0.001 ,A__=0.001 ,A__="all" ,A__=False ,A__=False ,A__=1.0 ,A__=0.2 ,A__=1 ,A__=0 ,A__=2 ,A__=False ,**A__ ,):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = router_z_loss_coef
lowercase = router_aux_loss_coef
lowercase = decoder_sparse_step
lowercase = encoder_sparse_step
lowercase = num_experts
lowercase = expert_capacity
lowercase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
lowercase = router_dtype
lowercase = router_ignore_padding_tokens
lowercase = batch_prioritized_routing
lowercase = second_expert_policy
lowercase = normalize_router_prob_before_dropping
lowercase = moe_eval_capacity_token_fraction
lowercase = moe_token_dropout
lowercase = output_router_logits
super().__init__(
pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,is_encoder_decoder=A__ ,decoder_start_token_id=A__ ,**A__ ,)
| 633 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 1 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self):
lowercase = []
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_init_end''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_train_begin''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_train_end''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_epoch_begin''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_epoch_end''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_step_begin''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_step_end''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_evaluate''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_predict''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_save''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_log''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_prediction_step''')
@require_torch
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = tempfile.mkdtemp()
def A__ ( self):
shutil.rmtree(self.output_dir)
def A__ ( self ,A__=0 ,A__=0 ,A__=6_4 ,A__=6_4 ,A__=None ,A__=False ,**A__):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase = RegressionDataset(length=A__)
lowercase = RegressionDataset(length=A__)
lowercase = RegressionModelConfig(a=A__ ,b=A__)
lowercase = RegressionPreTrainedModel(A__)
lowercase = TrainingArguments(self.output_dir ,disable_tqdm=A__ ,report_to=[] ,**A__)
return Trainer(
A__ ,A__ ,train_dataset=A__ ,eval_dataset=A__ ,callbacks=A__ ,)
def A__ ( self ,A__ ,A__):
self.assertEqual(len(A__) ,len(A__))
# Order doesn't matter
lowercase = sorted(A__ ,key=lambda A__: cb.__name__ if isinstance(A__ ,A__) else cb.__class__.__name__)
lowercase = sorted(A__ ,key=lambda A__: cb.__name__ if isinstance(A__ ,A__) else cb.__class__.__name__)
for cba, cba in zip(A__ ,A__):
if isinstance(A__ ,A__) and isinstance(A__ ,A__):
self.assertEqual(A__ ,A__)
elif isinstance(A__ ,A__) and not isinstance(A__ ,A__):
self.assertEqual(A__ ,cba.__class__)
elif not isinstance(A__ ,A__) and isinstance(A__ ,A__):
self.assertEqual(cba.__class__ ,A__)
else:
self.assertEqual(A__ ,A__)
def A__ ( self ,A__):
lowercase = ['''on_init_end''', '''on_train_begin''']
lowercase = 0
lowercase = len(trainer.get_eval_dataloader())
lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader()) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs):
expected_events.append('''on_epoch_begin''')
for _ in range(A__):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''')
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''')
expected_events.append('''on_epoch_end''')
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def A__ ( self):
lowercase = self.get_trainer()
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
# Callbacks passed at init are added to the default callbacks
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase = self.get_trainer(disable_tqdm=A__)
lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
def A__ ( self):
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A__)
expected_callbacks.remove(A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
lowercase = self.get_trainer()
lowercase = trainer.pop_callback(A__)
self.assertEqual(cb.__class__ ,A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
trainer.add_callback(A__)
expected_callbacks.insert(0 ,A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
# We can also add, pop, or remove by instance
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A__)
expected_callbacks.remove(A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
lowercase = trainer.pop_callback(A__)
self.assertEqual(A__ ,A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
trainer.add_callback(A__)
expected_callbacks.insert(0 ,A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
def A__ ( self):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=A__)
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
# Independent log/save/eval
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5)
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5)
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''')
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''')
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
# A bit of everything
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''') as warn_mock:
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(A__) in warn_mock.call_args[0][0]
| 633 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase__ :Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = set()
lowercase = []
def parse_line(lowerCAmelCase__ ):
for line in fp:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase__ ) > 0:
lowercase = '''\n'''.join(lowerCAmelCase__ )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCAmelCase__ )
buffer.clear()
continue
else:
lowercase = line.strip()
buffer.append(lowerCAmelCase__ )
if from_gh:
for filename in os.listdir(lowerCAmelCase__ ):
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase__ ) as fp:
parse_line(lowerCAmelCase__ )
else:
try:
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase__ ) as fp:
parse_line(lowerCAmelCase__ )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = set()
lowercase = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase__ , lowerCAmelCase__ ) )
return selected_warnings
if __name__ == "__main__":
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return values.split(''',''' )
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
lowercase__ :List[str] = parser.parse_args()
lowercase__ :str = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase__ :Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase__ :int = extract_warnings(args.output_dir, args.targets)
lowercase__ :Tuple = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 633 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowercase__ :List[Any] = pytest.mark.integration
lowercase__ :Optional[Any] = {"comet"}
lowercase__ :Any = importlib.util.find_spec("fairseq") is not None
lowercase__ :Optional[Any] = {"code_eval"}
lowercase__ :List[str] = os.name == "nt"
lowercase__ :Union[str, Any] = {"bertscore", "frugalscore", "perplexity"}
lowercase__ :int = importlib.util.find_spec("transformers") is not None
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , lowerCAmelCase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''\"test requires Fairseq\"''' )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , lowerCAmelCase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''\"test requires transformers\"''' )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , lowerCAmelCase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''\"test not supported on Windows\"''' )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@local
class lowercase ( parameterized.TestCase ):
lowercase_ : Tuple ={}
lowercase_ : Optional[int] =None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''')
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''')
def A__ ( self ,A__):
lowercase = '''[...]'''
lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' ,A__)).module_path)
lowercase = datasets.load.import_main_class(metric_module.__name__ ,dataset=A__)
# check parameters
lowercase = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(A__ ,metric_module.__name__):
with self.use_local_metrics():
try:
lowercase = doctest.testmod(A__ ,verbose=A__ ,raise_on_error=A__)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed ,0)
self.assertGreater(results.attempted ,1)
@slow
def A__ ( self ,A__):
lowercase = '''[...]'''
lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' ,A__)).module_path)
# run doctest
with self.use_local_metrics():
lowercase = doctest.testmod(A__ ,verbose=A__ ,raise_on_error=A__)
self.assertEqual(results.failed ,0)
self.assertGreater(results.attempted ,1)
@contextmanager
def A__ ( self ,A__ ,A__):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](A__):
yield
else:
yield
@contextmanager
def A__ ( self):
def load_local_metric(A__ ,*A__ ,**A__):
return load_metric(os.path.join('''metrics''' ,A__) ,*A__ ,**A__)
with patch('''datasets.load_metric''') as mock_load_metric:
lowercase = load_local_metric
yield
@classmethod
def A__ ( cls ,A__):
def wrapper(A__):
lowercase = contextmanager(A__)
lowercase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class lowercase ( _lowerCamelCase ):
def A__ ( self ,A__):
assert len(input_dict['''input_ids''']) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
lowercase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import torch
def bert_cos_score_idf(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
lowercase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def load_from_checkpoint(lowerCAmelCase__ ):
class lowercase :
def A__ ( self ,A__ ,*A__ ,**A__):
assert len(A__) == 2
lowercase = [0.19, 0.92]
return scores, sum(A__) / len(A__)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
lowercase = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
lowercase = load_from_checkpoint
yield
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
lowercase = '''ERROR'''
lowercase = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(SCREAMING_SNAKE_CASE_ , match=re.escape(SCREAMING_SNAKE_CASE_ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE_ )
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
from random import randint, random
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = 5 , ):
'''simple docstring'''
lowercase = [[-1] * number_of_cells] # Create a highway without any car
lowercase = 0
lowercase = max(__A , 0 )
while i < number_of_cells:
lowercase = (
randint(0 , __A ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
lowercase = highway_now[car_index + 1 :]
for cell in range(len(__A ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__A , -1 )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = len(__A )
# Beforce calculations, the highway is empty
lowercase = [-1] * number_of_cells
for car_index in range(__A ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase = min(highway_now[car_index] + 1 , __A )
# Number of empty cell before the next car
lowercase = get_distance(__A , __A ) - 1
# We can't have the car causing an accident
lowercase = min(next_highway[car_index] , __A )
if random() < probability:
# Randomly, a driver will slow down
lowercase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = len(highway[0] )
for i in range(__A ):
lowercase = update(highway[i] , __A , __A )
lowercase = [-1] * number_of_cells
for car_index in range(__A ):
lowercase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase = speed
highway.append(__A )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
class lowercase :
def __init__( self ,A__):
lowercase = len(UpperCAmelCase_)
lowercase = [0] * len_array
if len_array > 0:
lowercase = array[0]
for i in range(1 ,UpperCAmelCase_):
lowercase = self.prefix_sum[i - 1] + array[i]
def A__ ( self ,A__ ,A__):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A__ ( self ,A__):
lowercase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase_)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Union[str, Any] = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[Any] = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Load checkpoint
lowercase = torch.load(_lowercase , map_location='''cpu''' )
lowercase = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase = v
else:
lowercase = v
lowercase = chkpt['params']
lowercase = {n: v for n, v in config.items() if not isinstance(_lowercase , (torch.FloatTensor, numpy.ndarray) )}
lowercase = chkpt['dico_word2id']
lowercase = {s + '</w>' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowercase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(_lowercase , _lowercase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :str = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def UpperCamelCase ( lowerCAmelCase__ = 100_0000 , lowerCAmelCase__ = 10 ):
'''simple docstring'''
lowercase = defaultdict(_A )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowercase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowercase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_A , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
class lowercase :
def __init__( self ,A__ = "" ,A__ = False):
# Mapping from the first character of the prefix of the node
lowercase = {}
# A node will be a leaf if the tree contains its word
lowercase = is_leaf
lowercase = prefix
def A__ ( self ,A__):
lowercase = 0
for q, w in zip(self.prefix ,UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A__ ( self ,A__):
for word in words:
self.insert(UpperCAmelCase_)
def A__ ( self ,A__):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase = RadixNode(prefix=UpperCAmelCase_ ,is_leaf=UpperCAmelCase_)
else:
lowercase = self.nodes[word[0]]
lowercase = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase = remaining_prefix
lowercase = self.nodes[matching_string[0]]
lowercase = RadixNode(UpperCAmelCase_ ,UpperCAmelCase_)
lowercase = aux_node
if remaining_word == "":
lowercase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def A__ ( self ,A__):
lowercase = self.nodes.get(word[0] ,UpperCAmelCase_)
if not incoming_node:
return False
else:
lowercase = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def A__ ( self ,A__):
lowercase = self.nodes.get(word[0] ,UpperCAmelCase_)
if not incoming_node:
return False
else:
lowercase = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
lowercase = list(self.nodes.values())[0]
lowercase = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
lowercase = False
# If there is 1 edge, we merge it with its child
else:
lowercase = list(incoming_node.nodes.values())[0]
lowercase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase = merging_node.nodes
return True
def A__ ( self ,A__ = 0):
if self.prefix != "":
print('''-''' * height ,self.prefix ,''' (leaf)''' if self.is_leaf else '''''')
for value in self.nodes.values():
value.print_tree(height + 1)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = 'banana bananas bandana band apple all beast'.split()
lowercase = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def UpperCamelCase ( ):
'''simple docstring'''
assert test_trie()
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = RadixNode()
lowercase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase__ :int = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase__ :int = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCAmelCase__ )[0]
@deprecated(lowerCAmelCase__ , '''Please use tf.data to implement this functionality.''' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase__ ) as bytestream:
lowercase = _readaa(lowerCAmelCase__ )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowercase = _readaa(lowerCAmelCase__ )
lowercase = _readaa(lowerCAmelCase__ )
lowercase = _readaa(lowerCAmelCase__ )
lowercase = bytestream.read(rows * cols * num_images )
lowercase = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta )
lowercase = data.reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 1 )
return data
@deprecated(lowerCAmelCase__ , '''Please use tf.one_hot on tensors.''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = labels_dense.shape[0]
lowercase = numpy.arange(lowerCAmelCase__ ) * num_classes
lowercase = numpy.zeros((num_labels, num_classes) )
lowercase = 1
return labels_one_hot
@deprecated(lowerCAmelCase__ , '''Please use tf.data to implement this functionality.''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase__ ) as bytestream:
lowercase = _readaa(lowerCAmelCase__ )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowercase = _readaa(lowerCAmelCase__ )
lowercase = bytestream.read(lowerCAmelCase__ )
lowercase = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCAmelCase__ , lowerCAmelCase__ )
return labels
class lowercase :
@deprecated(
_lowercase ,'''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' ,)
def __init__( self ,A__ ,A__ ,A__=False ,A__=False ,A__=dtypes.floataa ,A__=True ,A__=None ,):
lowercase = random_seed.get_seed(_lowercase)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
lowercase = dtypes.as_dtype(_lowercase).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
lowercase = 1_0_0_0_0
lowercase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
lowercase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase = images.reshape(
images.shape[0] ,images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase = images.astype(numpy.floataa)
lowercase = numpy.multiply(_lowercase ,1.0 / 2_5_5.0)
lowercase = images
lowercase = labels
lowercase = 0
lowercase = 0
@property
def A__ ( self):
return self._images
@property
def A__ ( self):
return self._labels
@property
def A__ ( self):
return self._num_examples
@property
def A__ ( self):
return self._epochs_completed
def A__ ( self ,A__ ,A__=False ,A__=True):
if fake_data:
lowercase = [1] * 7_8_4
lowercase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase)],
[fake_label for _ in range(_lowercase)],
)
lowercase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase = numpy.arange(self._num_examples)
numpy.random.shuffle(_lowercase)
lowercase = self.images[perma]
lowercase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase = self._num_examples - start
lowercase = self._images[start : self._num_examples]
lowercase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase = numpy.arange(self._num_examples)
numpy.random.shuffle(_lowercase)
lowercase = self.images[perm]
lowercase = self.labels[perm]
# Start next epoch
lowercase = 0
lowercase = batch_size - rest_num_examples
lowercase = self._index_in_epoch
lowercase = self._images[start:end]
lowercase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) ,axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) ,axis=0),
)
else:
self._index_in_epoch += batch_size
lowercase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCAmelCase__ , '''Please write your own downloading logic.''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if not gfile.Exists(lowerCAmelCase__ ):
gfile.MakeDirs(lowerCAmelCase__ )
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not gfile.Exists(lowerCAmelCase__ ):
urllib.request.urlretrieve(lowerCAmelCase__ , lowerCAmelCase__ ) # noqa: S310
with gfile.GFile(lowerCAmelCase__ ) as f:
lowercase = f.size()
print('''Successfully downloaded''' , lowerCAmelCase__ , lowerCAmelCase__ , '''bytes.''' )
return filepath
@deprecated(
lowerCAmelCase__ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=dtypes.floataa , lowerCAmelCase__=True , lowerCAmelCase__=5000 , lowerCAmelCase__=None , lowerCAmelCase__=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCAmelCase__ , one_hot=lowerCAmelCase__ , dtype=lowerCAmelCase__ , seed=lowerCAmelCase__ )
lowercase = fake()
lowercase = fake()
lowercase = fake()
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__ )
if not source_url: # empty string check
lowercase = DEFAULT_SOURCE_URL
lowercase = 'train-images-idx3-ubyte.gz'
lowercase = 'train-labels-idx1-ubyte.gz'
lowercase = 't10k-images-idx3-ubyte.gz'
lowercase = 't10k-labels-idx1-ubyte.gz'
lowercase = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_images_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = _extract_images(lowerCAmelCase__ )
lowercase = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_labels_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__ )
lowercase = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_images_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = _extract_images(lowerCAmelCase__ )
lowercase = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_labels_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__ )
if not 0 <= validation_size <= len(lowerCAmelCase__ ):
lowercase = (
'Validation size should be between 0 and '
f'{len(lowerCAmelCase__ )}. Received: {validation_size}.'
)
raise ValueError(lowerCAmelCase__ )
lowercase = train_images[:validation_size]
lowercase = train_labels[:validation_size]
lowercase = train_images[validation_size:]
lowercase = train_labels[validation_size:]
lowercase = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowercase = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__ )
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( __A ):
lowercase_ : List[str] ='''naver-clova-ix/donut-base-finetuned-docvqa'''
lowercase_ : Optional[int] =(
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
lowercase_ : str ='''document_qa'''
lowercase_ : Tuple =AutoProcessor
lowercase_ : List[str] =VisionEncoderDecoderModel
lowercase_ : List[str] =['''image''', '''text''']
lowercase_ : Dict =['''text''']
def __init__( self ,*A__ ,**A__):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''')
super().__init__(*A__ ,**A__)
def A__ ( self ,A__ ,A__):
lowercase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase = task_prompt.replace('''{user_input}''' ,A__)
lowercase = self.pre_processor.tokenizer(
A__ ,add_special_tokens=A__ ,return_tensors='''pt''').input_ids
lowercase = self.pre_processor(A__ ,return_tensors='''pt''').pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A__ ( self ,A__):
return self.model.generate(
inputs['''pixel_values'''].to(self.device) ,decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device) ,max_length=self.model.decoder.config.max_position_embeddings ,early_stopping=A__ ,pad_token_id=self.pre_processor.tokenizer.pad_token_id ,eos_token_id=self.pre_processor.tokenizer.eos_token_id ,use_cache=A__ ,num_beams=1 ,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] ,return_dict_in_generate=A__ ,).sequences
def A__ ( self ,A__):
lowercase = self.pre_processor.batch_decode(A__)[0]
lowercase = sequence.replace(self.pre_processor.tokenizer.eos_token ,'''''')
lowercase = sequence.replace(self.pre_processor.tokenizer.pad_token ,'''''')
lowercase = re.sub(r'''<.*?>''' ,'''''' ,A__ ,count=1).strip() # remove first task start token
lowercase = self.pre_processor.tokenajson(A__)
return sequence["answer"]
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowercase = cst_fwd.get(__lowerCAmelCase , np.inf )
lowercase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowercase = new_cost_f
lowercase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowercase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = -1
lowercase = set()
lowercase = set()
lowercase = {source: 0}
lowercase = {destination: 0}
lowercase = {source: None}
lowercase = {destination: None}
lowercase = PriorityQueue()
lowercase = PriorityQueue()
lowercase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowercase , lowercase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
lowercase , lowercase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
lowercase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
lowercase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowercase = shortest_distance
return shortest_path_distance
lowercase__ :List[str] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
lowercase__ :Dict = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :str = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase__ :str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
from math import loga
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase__ :Optional[int] = ""
lowercase__ :Dict = ""
lowercase__ :Optional[int] = ""
lowercase__ :Tuple = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print('''Processing...''' )
lowercase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase = random_chars(32 )
lowercase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Success {index+1}/{len(__lowerCAmelCase )} with {file_name}' )
lowercase = []
for anno in new_annos[index]:
lowercase = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(__lowerCAmelCase )
with open(f'/{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
lowercase = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ):
lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
lowercase = in_file.readlines()
lowercase = os.path.join(__lowerCAmelCase , f'{label_name}.jpg' )
lowercase = []
for obj_list in obj_lists:
lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ):
'''simple docstring'''
lowercase = []
lowercase = []
lowercase = []
for idx in range(len(__lowerCAmelCase ) ):
lowercase = []
lowercase = img_list[idx]
path_list.append(__lowerCAmelCase )
lowercase = anno_list[idx]
lowercase = cva.imread(__lowerCAmelCase )
if flip_type == 1:
lowercase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
lowercase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowercase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
lowercase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase ( lowerCAmelCase__ = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowercase = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase = f'Input value of [number={number}] must be an integer'
raise TypeError(__UpperCAmelCase )
if number < 0:
return False
lowercase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase = 0
print(lowerCAmelCase__ , end=''',''' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=''',''' )
lowercase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ :List[Any] = [1, 3, 0, 5, 8, 5]
lowercase__ :Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( __lowerCAmelCase , unittest.TestCase ):
lowercase_ : Any =XLMRobertaTokenizer
lowercase_ : Optional[int] =XLMRobertaTokenizerFast
lowercase_ : Optional[int] =True
lowercase_ : List[Any] =True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XLMRobertaTokenizer(_UpperCamelCase ,keep_accents=_UpperCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
lowercase = """<pad>"""
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase) ,_UpperCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase) ,_UpperCamelCase)
def A__ ( self):
lowercase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<s>''')
self.assertEqual(vocab_keys[1] ,'''<pad>''')
self.assertEqual(vocab_keys[-1] ,'''<mask>''')
self.assertEqual(len(_UpperCamelCase) ,1_0_0_2)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_2)
def A__ ( self):
lowercase = XLMRobertaTokenizer(_UpperCamelCase ,keep_accents=_UpperCamelCase)
lowercase = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_UpperCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_UpperCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
lowercase = tokenizer.convert_ids_to_tokens(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def A__ ( self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase ,**_UpperCamelCase)
lowercase = self.tokenizer_class.from_pretrained(_UpperCamelCase ,**_UpperCamelCase)
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(_UpperCamelCase)
lowercase = tokenizer_p.save_pretrained(_UpperCamelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
lowercase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f)
self.assertSequenceEqual(_UpperCamelCase ,_UpperCamelCase)
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(_UpperCamelCase)
lowercase = tokenizer_p.from_pretrained(_UpperCamelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase ,_UpperCamelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCamelCase)
# Save tokenizer rust, legacy_format=True
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(_UpperCamelCase ,legacy_format=_UpperCamelCase)
lowercase = tokenizer_p.save_pretrained(_UpperCamelCase)
# Checks it save with the same files
self.assertSequenceEqual(_UpperCamelCase ,_UpperCamelCase)
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(_UpperCamelCase)
lowercase = tokenizer_p.from_pretrained(_UpperCamelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase ,_UpperCamelCase))
shutil.rmtree(_UpperCamelCase)
# Save tokenizer rust, legacy_format=False
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(_UpperCamelCase ,legacy_format=_UpperCamelCase)
lowercase = tokenizer_p.save_pretrained(_UpperCamelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(_UpperCamelCase)
lowercase = tokenizer_p.from_pretrained(_UpperCamelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase ,_UpperCamelCase))
shutil.rmtree(_UpperCamelCase)
@cached_property
def A__ ( self):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''')
def A__ ( self):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCamelCase ,f.name)
lowercase = XLMRobertaTokenizer(f.name ,keep_accents=_UpperCamelCase)
lowercase = pickle.dumps(_UpperCamelCase)
pickle.loads(_UpperCamelCase)
def A__ ( self):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = """I was born in 92000, and this is falsé."""
lowercase = tokenizer.tokenize(_UpperCamelCase)
lowercase = rust_tokenizer.tokenize(_UpperCamelCase)
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase)
lowercase = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase)
lowercase = rust_tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase)
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase)
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(_UpperCamelCase)
lowercase = rust_tokenizer.encode(_UpperCamelCase)
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase)
@slow
def A__ ( self):
lowercase = """Hello World!"""
lowercase = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCamelCase ,self.big_tokenizer.encode(_UpperCamelCase))
@slow
def A__ ( self):
lowercase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowercase = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCamelCase ,self.big_tokenizer.encode(_UpperCamelCase))
@slow
def A__ ( self):
lowercase = {"""input_ids""": [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase ,model_name='''xlm-roberta-base''' ,revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' ,)
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase__ :Any = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowercase = get_sagemaker_input()
else:
lowercase = get_cluster_input()
return config
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''config''' , description=_lowerCamelCase )
else:
lowercase = argparse.ArgumentParser('''Accelerate config command''' , description=_lowerCamelCase )
parser.add_argument(
'''--config_file''' , default=_lowerCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_user_input()
if args.config_file is not None:
lowercase = args.config_file
else:
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
lowercase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_lowerCamelCase )
else:
config.to_yaml_file(_lowerCamelCase )
print(f'accelerate configuration saved at {config_file}' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = config_command_parser()
lowercase = parser.parse_args()
config_command(_lowerCamelCase )
if __name__ == "__main__":
main()
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ :Optional[Any] = logging.get_logger(__name__)
lowercase__ :Tuple = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
lowercase__ :Optional[Any] = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = EfficientNetConfig()
lowercase = CONFIG_MAP[model_name]["hidden_dim"]
lowercase = CONFIG_MAP[model_name]["width_coef"]
lowercase = CONFIG_MAP[model_name]["depth_coef"]
lowercase = CONFIG_MAP[model_name]["image_size"]
lowercase = CONFIG_MAP[model_name]["dropout_rate"]
lowercase = CONFIG_MAP[model_name]["dw_padding"]
lowercase = "huggingface/label-files"
lowercase = "imagenet-1k-id2label.json"
lowercase = 1000
lowercase = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = CONFIG_MAP[model_name]["image_size"]
lowercase = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_lowerCamelCase , )
return preprocessor
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
lowercase = sorted(set(_lowerCamelCase ) )
lowercase = len(_lowerCamelCase )
lowercase = {b: str(_lowerCamelCase ) for b, i in zip(_lowerCamelCase , range(_lowerCamelCase ) )}
lowercase = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
lowercase = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
lowercase = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase = "efficientnet." + item[1]
lowercase = "classifier.weight"
lowercase = "classifier.bias"
return key_mapping
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase = torch.from_numpy(_lowerCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase = torch.from_numpy(_lowerCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase = torch.from_numpy(np.transpose(_lowerCamelCase ) )
else:
lowercase = torch.from_numpy(_lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = model_classes[model_name](
include_top=_lowerCamelCase , weights='''imagenet''' , input_tensor=_lowerCamelCase , input_shape=_lowerCamelCase , pooling=_lowerCamelCase , classes=1000 , classifier_activation='''softmax''' , )
lowercase = original_model.trainable_variables
lowercase = original_model.non_trainable_variables
lowercase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase = param.numpy()
lowercase = list(tf_params.keys() )
# Load HuggingFace model
lowercase = get_efficientnet_config(_lowerCamelCase )
lowercase = EfficientNetForImageClassification(_lowerCamelCase ).eval()
lowercase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
lowercase = rename_keys(_lowerCamelCase )
replace_params(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Initialize preprocessor and preprocess input image
lowercase = convert_image_processor(_lowerCamelCase )
lowercase = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase = hf_model(**_lowerCamelCase )
lowercase = outputs.logits.detach().numpy()
# Original model inference
lowercase = False
lowercase = CONFIG_MAP[model_name]["image_size"]
lowercase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase = image.img_to_array(_lowerCamelCase )
lowercase = np.expand_dims(_lowerCamelCase , axis=0 )
lowercase = original_model.predict(_lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_lowerCamelCase ):
os.mkdir(_lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_lowerCamelCase )
preprocessor.save_pretrained(_lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
lowercase = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_lowerCamelCase )
hf_model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
lowercase__ :int = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = FunnelConfig.from_json_file(a_ )
print(f'Building PyTorch model from configuration: {config}' )
lowercase = FunnelBaseModel(a_ ) if base_model else FunnelModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(a_ , a_ , a_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
lowercase__ :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _a , unittest.TestCase ):
lowercase_ : Tuple =XLMRobertaTokenizer
lowercase_ : Dict =XLMRobertaTokenizerFast
lowercase_ : Tuple =True
lowercase_ : Optional[int] =True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XLMRobertaTokenizer(snake_case_ ,keep_accents=snake_case_)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
lowercase = '''<pad>'''
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_) ,snake_case_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_) ,snake_case_)
def A__ ( self):
lowercase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<s>''')
self.assertEqual(vocab_keys[1] ,'''<pad>''')
self.assertEqual(vocab_keys[-1] ,'''<mask>''')
self.assertEqual(len(snake_case_) ,1_0_0_2)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_2)
def A__ ( self):
lowercase = XLMRobertaTokenizer(snake_case_ ,keep_accents=snake_case_)
lowercase = tokenizer.tokenize('''This is a test''')
self.assertListEqual(snake_case_ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
snake_case_ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase = tokenizer.convert_tokens_to_ids(snake_case_)
self.assertListEqual(
snake_case_ ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
lowercase = tokenizer.convert_ids_to_tokens(snake_case_)
self.assertListEqual(
snake_case_ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def A__ ( self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case_ ,**snake_case_)
lowercase = self.tokenizer_class.from_pretrained(snake_case_ ,**snake_case_)
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(snake_case_)
lowercase = tokenizer_p.save_pretrained(snake_case_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
lowercase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f)
self.assertSequenceEqual(snake_case_ ,snake_case_)
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(snake_case_)
lowercase = tokenizer_p.from_pretrained(snake_case_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ ,snake_case_))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case_)
# Save tokenizer rust, legacy_format=True
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(snake_case_ ,legacy_format=snake_case_)
lowercase = tokenizer_p.save_pretrained(snake_case_)
# Checks it save with the same files
self.assertSequenceEqual(snake_case_ ,snake_case_)
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(snake_case_)
lowercase = tokenizer_p.from_pretrained(snake_case_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ ,snake_case_))
shutil.rmtree(snake_case_)
# Save tokenizer rust, legacy_format=False
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(snake_case_ ,legacy_format=snake_case_)
lowercase = tokenizer_p.save_pretrained(snake_case_)
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(snake_case_)
lowercase = tokenizer_p.from_pretrained(snake_case_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ ,snake_case_))
shutil.rmtree(snake_case_)
@cached_property
def A__ ( self):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''')
def A__ ( self):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case_ ,f.name)
lowercase = XLMRobertaTokenizer(f.name ,keep_accents=snake_case_)
lowercase = pickle.dumps(snake_case_)
pickle.loads(snake_case_)
def A__ ( self):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = tokenizer.tokenize(snake_case_)
lowercase = rust_tokenizer.tokenize(snake_case_)
self.assertListEqual(snake_case_ ,snake_case_)
lowercase = tokenizer.encode(snake_case_ ,add_special_tokens=snake_case_)
lowercase = rust_tokenizer.encode(snake_case_ ,add_special_tokens=snake_case_)
self.assertListEqual(snake_case_ ,snake_case_)
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(snake_case_)
lowercase = rust_tokenizer.encode(snake_case_)
self.assertListEqual(snake_case_ ,snake_case_)
@slow
def A__ ( self):
lowercase = '''Hello World!'''
lowercase = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ ,self.big_tokenizer.encode(snake_case_))
@slow
def A__ ( self):
lowercase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ ,self.big_tokenizer.encode(snake_case_))
@slow
def A__ ( self):
# fmt: off
lowercase = {'''input_ids''': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ ,model_name='''xlm-roberta-base''' ,revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' ,)
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ :List[str] = 50_0000
lowercase__ :Union[str, Any] = os.path.split(__file__)
lowercase__ :Dict = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def UpperCamelCase ( lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
lowercase = dataset.map(**__UpperCamelCase )
@get_duration
def UpperCamelCase ( lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
lowercase = dataset.filter(**__UpperCamelCase )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase = generate_example_dataset(
os.path.join(__UpperCamelCase , '''dataset.arrow''' ) , __UpperCamelCase , num_examples=__UpperCamelCase )
lowercase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__UpperCamelCase )
def tokenize(lowerCAmelCase__ ):
return tokenizer(examples['''text'''] )
lowercase = map(__UpperCamelCase )
lowercase = map(__UpperCamelCase , batched=__UpperCamelCase )
lowercase = map(__UpperCamelCase , function=lambda lowerCAmelCase__ : None , batched=__UpperCamelCase )
with dataset.formatted_as(type='''numpy''' ):
lowercase = map(__UpperCamelCase , function=lambda lowerCAmelCase__ : None , batched=__UpperCamelCase )
with dataset.formatted_as(type='''pandas''' ):
lowercase = map(__UpperCamelCase , function=lambda lowerCAmelCase__ : None , batched=__UpperCamelCase )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase = map(__UpperCamelCase , function=lambda lowerCAmelCase__ : None , batched=__UpperCamelCase )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase = map(__UpperCamelCase , function=lambda lowerCAmelCase__ : None , batched=__UpperCamelCase )
lowercase = map(__UpperCamelCase , function=__UpperCamelCase , batched=__UpperCamelCase )
lowercase = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase , '''wb''' ) as f:
f.write(json.dumps(__UpperCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowercase ( _lowercase ):
def __get__( self ,A__ ,A__=None):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''')
lowercase = '''__cached_''' + self.fget.__name__
lowercase = getattr(A_ ,A_ ,A_)
if cached is None:
lowercase = self.fget(A_)
setattr(A_ ,A_ ,A_)
return cached
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if is_torch_fx_proxy(snake_case__ ):
return True
if is_torch_available():
import torch
if isinstance(snake_case__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(snake_case__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(snake_case__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(snake_case__ , np.ndarray )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return isinstance(snake_case__ , np.ndarray )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return _is_numpy(snake_case__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import torch
return isinstance(snake_case__ , torch.Tensor )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(snake_case__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import torch
return isinstance(snake_case__ , torch.device )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(snake_case__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import torch
if isinstance(snake_case__ , snake_case__ ):
if hasattr(snake_case__ , snake_case__ ):
lowercase = getattr(snake_case__ , snake_case__ )
else:
return False
return isinstance(snake_case__ , torch.dtype )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(snake_case__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import tensorflow as tf
return isinstance(snake_case__ , tf.Tensor )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(snake_case__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(snake_case__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(snake_case__ )
return type(snake_case__ ) == tf.Tensor
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(snake_case__ , jnp.ndarray )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(snake_case__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_py_obj(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return [to_py_obj(snake_case__ ) for o in obj]
elif is_tf_tensor(snake_case__ ):
return obj.numpy().tolist()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ ).tolist()
elif isinstance(snake_case__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_numpy(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return np.array(snake_case__ )
elif is_tf_tensor(snake_case__ ):
return obj.numpy()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ )
else:
return obj
class lowercase ( _lowercase ):
def A__ ( self):
lowercase = fields(self)
# Safety and consistency checks
if not len(A_):
raise ValueError(f'{self.__class__.__name__} has no fields.')
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.')
lowercase = getattr(self ,class_fields[0].name)
lowercase = all(getattr(self ,field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(A_):
if isinstance(A_ ,A_):
lowercase = first_field.items()
lowercase = True
else:
try:
lowercase = iter(A_)
lowercase = True
except TypeError:
lowercase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A_):
if (
not isinstance(A_ ,(list, tuple))
or not len(A_) == 2
or not isinstance(element[0] ,A_)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowercase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).')
break
setattr(self ,element[0] ,element[1])
if element[1] is not None:
lowercase = element[1]
elif first_field is not None:
lowercase = first_field
else:
for field in class_fields:
lowercase = getattr(self ,field.name)
if v is not None:
lowercase = v
def __delitem__( self ,*A__ ,**A__):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def A__ ( self ,*A__ ,**A__):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def A__ ( self ,*A__ ,**A__):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def A__ ( self ,*A__ ,**A__):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__( self ,A__):
if isinstance(A_ ,A_):
lowercase = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self ,A__ ,A__):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A_ ,A_)
super().__setattr__(A_ ,A_)
def __setitem__( self ,A__ ,A__):
# Will raise a KeyException if needed
super().__setitem__(A_ ,A_)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A_ ,A_)
def A__ ( self):
return tuple(self[k] for k in self.keys())
class lowercase ( _lowercase , _lowercase ):
@classmethod
def A__ ( cls ,A__):
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}')
class lowercase ( _lowercase ):
lowercase_ : str ='''longest'''
lowercase_ : Optional[int] ='''max_length'''
lowercase_ : Any ='''do_not_pad'''
class lowercase ( _lowercase ):
lowercase_ : List[Any] ='''pt'''
lowercase_ : Union[str, Any] ='''tf'''
lowercase_ : Tuple ='''np'''
lowercase_ : Dict ='''jax'''
class lowercase :
def __init__( self ,A__):
lowercase = context_managers
lowercase = ExitStack()
def __enter__( self):
for context_manager in self.context_managers:
self.stack.enter_context(A_)
def __exit__( self ,*A__ ,**A__):
self.stack.__exit__(*A_ ,**A_)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = infer_framework(snake_case__ )
if framework == "tf":
lowercase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = model_class.__name__
lowercase = infer_framework(snake_case__ )
if framework == "tf":
lowercase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ):
'''simple docstring'''
def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ):
for k, v in d.items():
lowercase = str(snake_case__ ) + delimiter + str(snake_case__ ) if parent_key else k
if v and isinstance(snake_case__ , snake_case__ ):
yield from flatten_dict(snake_case__ , snake_case__ , delimiter=snake_case__ ).items()
else:
yield key, v
return dict(_flatten_dict(snake_case__ , snake_case__ , snake_case__ ) )
@contextmanager
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if is_numpy_array(snake_case__ ):
return np.transpose(snake_case__ , axes=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.T if axes is None else array.permute(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.transpose(snake_case__ , perm=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.transpose(snake_case__ , axes=snake_case__ )
else:
raise ValueError(f'Type not supported for transpose: {type(snake_case__ )}.' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if is_numpy_array(snake_case__ ):
return np.reshape(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.reshape(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.reshape(snake_case__ , snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.reshape(snake_case__ , snake_case__ )
else:
raise ValueError(f'Type not supported for reshape: {type(snake_case__ )}.' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if is_numpy_array(snake_case__ ):
return np.squeeze(snake_case__ , axis=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.squeeze() if axis is None else array.squeeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.squeeze(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.squeeze(snake_case__ , axis=snake_case__ )
else:
raise ValueError(f'Type not supported for squeeze: {type(snake_case__ )}.' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if is_numpy_array(snake_case__ ):
return np.expand_dims(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.unsqueeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.expand_dims(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.expand_dims(snake_case__ , axis=snake_case__ )
else:
raise ValueError(f'Type not supported for expand_dims: {type(snake_case__ )}.' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if is_numpy_array(snake_case__ ):
return np.size(snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.numel()
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.size(snake_case__ )
elif is_jax_tensor(snake_case__ ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(snake_case__ )}.' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(snake_case__ , (tuple, list) ):
lowercase = [f'{repo_id}--{v}' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowercase = f'{repo_id}--{value}'
return auto_map
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for base_class in inspect.getmro(snake_case__ ):
lowercase = base_class.__module__
lowercase = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Initialise PyTorch model
lowercase = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'Building PyTorch model from configuration: {config}' )
lowercase = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
lowercase = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase__ :Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase = k.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return k
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DEFAULTS.copy()
cfg_kwargs.update(lowerCAmelCase__ )
lowercase = PegasusConfig(**lowerCAmelCase__ )
lowercase = PegasusForConditionalGeneration(lowerCAmelCase__ )
lowercase = torch_model.model.state_dict()
lowercase = {}
for k, v in tf_weights.items():
lowercase = rename_state_dict_key(lowerCAmelCase__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
lowercase = v.T
lowercase = torch.tensor(lowerCAmelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
lowercase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
lowercase = mapping['''shared.weight''']
lowercase = mapping['''shared.weight''']
lowercase = {k: torch.zeros_like(lowerCAmelCase__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**lowerCAmelCase__ )
lowercase , lowercase = torch_model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def UpperCamelCase ( lowerCAmelCase__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
lowercase = tf.train.list_variables(lowerCAmelCase__ )
lowercase = {}
lowercase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(lowerCAmelCase__ , desc='''converting tf checkpoint to dict''' ):
lowercase = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = array
return tf_weights
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Path(lowerCAmelCase__ ).parent.name
lowercase = task_specific_params[f'summarization_{dataset}']['''max_position_embeddings''']
lowercase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=lowerCAmelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCAmelCase__ )
# convert model
lowercase = get_tf_weights_as_numpy(lowerCAmelCase__ )
lowercase = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
lowercase = task_specific_params
lowercase = convert_pegasus(lowerCAmelCase__ , lowerCAmelCase__ )
torch_model.save_pretrained(lowerCAmelCase__ )
lowercase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(lowerCAmelCase__ , Path(lowerCAmelCase__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
lowercase__ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Dict = parser.parse_args()
if args.save_dir is None:
lowercase__ :Dict = Path(args.tf_ckpt_path).parent.name
lowercase__ :Optional[Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
from string import ascii_uppercase
lowercase__ :List[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(snake_case_ , snake_case_ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(snake_case_ , snake_case_ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
lowercase : Union[str, Any] = ""
lowercase : List[Any] = 0
lowercase : str = 0
while div != 1:
lowercase : Optional[Any] = divmod(snake_case_ , snake_case_ )
if base >= 11 and 9 < mod < 36:
lowercase : Union[str, Any] = ALPHABET_VALUES[str(snake_case_ )]
else:
lowercase : Optional[Any] = str(snake_case_ )
new_value += actual_value
lowercase : Any = num // base
lowercase : Any = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(snake_case_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
from math import factorial
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_lowerCamelCase ) // (factorial(_lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F'fifty-two card deck is: {combinations(52, 5)}\n',
)
print(
"If a class of 40 students must be arranged into groups of",
F'4 for group projects, there are {combinations(40, 4)} ways',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F'are {combinations(10, 3)} ways that first, second and',
"third place can be awarded.",
)
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase__ :Tuple = None
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase__ :Optional[int] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
lowercase__ :Any = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
lowercase__ :Tuple = "▁"
class lowercase ( __SCREAMING_SNAKE_CASE ):
lowercase_ : Any =VOCAB_FILES_NAMES
lowercase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] =['''input_ids''', '''attention_mask''']
lowercase_ : Optional[Any] =BarthezTokenizer
def __init__( self ,A__=None ,A__=None ,A__="<s>" ,A__="</s>" ,A__="</s>" ,A__="<s>" ,A__="<unk>" ,A__="<pad>" ,A__="<mask>" ,**A__ ,):
lowercase = AddedToken(_a ,lstrip=_a ,rstrip=_a) if isinstance(_a ,_a) else mask_token
super().__init__(
_a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,**_a ,)
lowercase = vocab_file
lowercase = False if not self.vocab_file else True
def A__ ( self ,A__ ,A__ = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def A__ ( self ,A__ ,A__ = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(_a):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
_a ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_a):
copyfile(self.vocab_file ,_a)
return (out_vocab_file,)
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowercase__ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] ='''M-CLIP'''
def __init__( self ,A__=1_0_2_4 ,A__=7_6_8 ,**A__):
lowercase = transformerDimSize
lowercase = imageDimSize
super().__init__(**__lowerCAmelCase)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] =MCLIPConfig
def __init__( self ,A__ ,*A__ ,**A__):
super().__init__(__lowerCAmelCase ,*__lowerCAmelCase ,**__lowerCAmelCase)
lowercase = XLMRobertaModel(__lowerCAmelCase)
lowercase = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims)
def A__ ( self ,A__ ,A__):
lowercase = self.transformer(input_ids=__lowerCAmelCase ,attention_mask=__lowerCAmelCase)[0]
lowercase = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(__lowerCAmelCase), embs
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase__ :Union[str, Any] = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class lowercase ( _UpperCAmelCase ):
lowercase_ : Optional[Any] ='''albert'''
def __init__( self ,A__=3_0_0_0_0 ,A__=1_2_8 ,A__=4_0_9_6 ,A__=1_2 ,A__=1 ,A__=6_4 ,A__=1_6_3_8_4 ,A__=1 ,A__="gelu_new" ,A__=0 ,A__=0 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0.1 ,A__="absolute" ,A__=0 ,A__=2 ,A__=3 ,**A__ ,):
super().__init__(pad_token_id=lowercase_ ,bos_token_id=lowercase_ ,eos_token_id=lowercase_ ,**lowercase_)
lowercase = vocab_size
lowercase = embedding_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_hidden_groups
lowercase = num_attention_heads
lowercase = inner_group_num
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout_prob
lowercase = position_embedding_type
class lowercase ( _UpperCAmelCase ):
@property
def A__ ( self):
if self.task == "multiple-choice":
lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
import sys
lowercase__ :Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 1
for digit in s:
product *= int(_lowercase )
return product
def UpperCamelCase ( lowerCAmelCase__ = N ):
'''simple docstring'''
lowercase = -sys.maxsize - 1
lowercase = n[:13]
lowercase = 13
while cur_index < len(_lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase = substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase = max(_lowercase , str_eval(_lowercase ) )
lowercase = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
lowercase = len(UpperCAmelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase = i + 1
else:
lowercase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ :Tuple = "hf-internal-testing/tiny-random-bert"
lowercase__ :List[Any] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowercase__ :str = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = cached_file(UpperCAmelCase_ ,UpperCAmelCase_)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ ,UpperCAmelCase_)))
with open(os.path.join(UpperCAmelCase_ ,'''refs''' ,'''main''')) as f:
lowercase = f.read()
self.assertEqual(UpperCAmelCase_ ,os.path.join(UpperCAmelCase_ ,'''snapshots''' ,UpperCAmelCase_ ,UpperCAmelCase_))
self.assertTrue(os.path.isfile(UpperCAmelCase_))
# File is cached at the same place the second time.
lowercase = cached_file(UpperCAmelCase_ ,UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_)
# Using a specific revision to test the full commit hash.
lowercase = cached_file(UpperCAmelCase_ ,UpperCAmelCase_ ,revision='''9b8c223''')
self.assertEqual(UpperCAmelCase_ ,os.path.join(UpperCAmelCase_ ,'''snapshots''' ,UpperCAmelCase_ ,UpperCAmelCase_))
def A__ ( self):
with self.assertRaisesRegex(UpperCAmelCase_ ,'''is not a valid model identifier'''):
lowercase = cached_file('''tiny-random-bert''' ,UpperCAmelCase_)
with self.assertRaisesRegex(UpperCAmelCase_ ,'''is not a valid git identifier'''):
lowercase = cached_file(UpperCAmelCase_ ,UpperCAmelCase_ ,revision='''aaaa''')
with self.assertRaisesRegex(UpperCAmelCase_ ,'''does not appear to have a file named'''):
lowercase = cached_file(UpperCAmelCase_ ,'''conf''')
def A__ ( self):
with self.assertRaisesRegex(UpperCAmelCase_ ,'''does not appear to have a file named'''):
lowercase = cached_file(UpperCAmelCase_ ,'''conf''')
with open(os.path.join(UpperCAmelCase_ ,'''refs''' ,'''main''')) as f:
lowercase = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ ,'''.no_exist''' ,UpperCAmelCase_ ,'''conf''')))
lowercase = cached_file(UpperCAmelCase_ ,'''conf''' ,_raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
lowercase = cached_file(UpperCAmelCase_ ,'''conf''' ,local_files_only=UpperCAmelCase_ ,_raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
lowercase = mock.Mock()
lowercase = 5_0_0
lowercase = {}
lowercase = HTTPError
lowercase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' ,return_value=UpperCAmelCase_) as mock_head:
lowercase = cached_file(UpperCAmelCase_ ,'''conf''' ,_raise_exceptions_for_connection_errors=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' ,UpperCAmelCase_))
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' ,UpperCAmelCase_))
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' ,UpperCAmelCase_))
def A__ ( self):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' ,'''ahah.txt'''))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ ,'''is not a valid model identifier'''):
get_file_from_repo('''bert-base-case''' ,UpperCAmelCase_)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ ,'''is not a valid git identifier'''):
get_file_from_repo('''bert-base-cased''' ,UpperCAmelCase_ ,revision='''ahaha''')
lowercase = get_file_from_repo('''bert-base-cased''' ,UpperCAmelCase_)
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase = json.loads(open(UpperCAmelCase_ ,'''r''').read())
self.assertEqual(config['''hidden_size'''] ,7_6_8)
def A__ ( self):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(UpperCAmelCase_) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ ,'''a.txt''') ,str(UpperCAmelCase_))
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ ,'''b.txt'''))
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase__ :Optional[Any] = parse(importlib.metadata.version("torch"))
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowercase , _lowercase ):
lowercase = parse(importlib.metadata.version(_lowercase ) )
return operation(_lowercase , parse(_lowercase ) )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return compare_versions(_lowercase , _lowercase , _lowercase )
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''')
lowercase = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] ,dtype=tf.intaa), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa),
}
lowercase = model(_SCREAMING_SNAKE_CASE)['''last_hidden_state''']
lowercase = tf.TensorShape((1, 6, 7_6_8))
self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE)
# compare the actual values for a slice.
lowercase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4))
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( UpperCAmelCase__ ):
lowercase_ : str ='vision-encoder-decoder'
lowercase_ : Union[str, Any] =True
def __init__( self ,**A__):
super().__init__(**A__)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'A configuraton of type {self.model_type} cannot be instantiated because '
f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}')
lowercase = kwargs.pop('''encoder''')
lowercase = encoder_config.pop('''model_type''')
lowercase = kwargs.pop('''decoder''')
lowercase = decoder_config.pop('''model_type''')
lowercase = AutoConfig.for_model(A__ ,**A__)
lowercase = AutoConfig.for_model(A__ ,**A__)
lowercase = True
@classmethod
def A__ ( cls ,A__ ,A__ ,**A__):
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
lowercase = True
lowercase = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**A__)
def A__ ( self):
lowercase = copy.deepcopy(self.__dict__)
lowercase = self.encoder.to_dict()
lowercase = self.decoder.to_dict()
lowercase = self.__class__.model_type
return output
class lowercase ( UpperCAmelCase__ ):
lowercase_ : Any =version.parse('''1.11''' )
@property
def A__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def A__ ( self):
return 1E-4
@property
def A__ ( self):
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}})
class lowercase ( UpperCAmelCase__ ):
@property
def A__ ( self):
lowercase = OrderedDict()
lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
lowercase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def A__ ( self ,A__ ,A__ = -1 ,A__ = -1 ,A__ = False ,A__ = None ,):
import torch
lowercase = OrderedDict()
lowercase = super().generate_dummy_inputs(
A__ ,batch_size=A__ ,seq_length=A__ ,is_pair=A__ ,framework=A__)
lowercase = dummy_input["""input_ids"""].shape
lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase = dummy_input.pop('''input_ids''')
lowercase = dummy_input.pop('''attention_mask''')
lowercase = torch.zeros(A__)
return common_inputs
class lowercase ( UpperCAmelCase__ ):
@property
def A__ ( self):
pass
def A__ ( self ,A__):
return VisionEncoderDecoderEncoderOnnxConfig(A__)
def A__ ( self ,A__ ,A__ ,A__ = "default"):
lowercase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(A__ ,A__)
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=2_4 ,A__=2 ,A__=6 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=None ,A__=1_0_0_0 ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = scope
lowercase = range_bbox
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = t
lowercase = None
if self.use_input_mask:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2)
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self):
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
lowercase = LiltModel(config=__lowercase)
model.to(__lowercase)
model.eval()
lowercase = model(__lowercase ,bbox=__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase)
lowercase = model(__lowercase ,bbox=__lowercase ,token_type_ids=__lowercase)
lowercase = model(__lowercase ,bbox=__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
lowercase = self.num_labels
lowercase = LiltForTokenClassification(config=__lowercase)
model.to(__lowercase)
model.eval()
lowercase = model(
__lowercase ,bbox=__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase ,labels=__lowercase)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
lowercase = LiltForQuestionAnswering(config=__lowercase)
model.to(__lowercase)
model.eval()
lowercase = model(
__lowercase ,bbox=__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase ,start_positions=__lowercase ,end_positions=__lowercase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase_ : str =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] =(
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Any =False
lowercase_ : Dict =False
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__):
return True
def A__ ( self):
lowercase = LiltModelTester(self)
lowercase = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*__lowercase)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase)
@slow
def A__ ( self):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LiltModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_torch
@slow
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(__lowercase)
lowercase = torch.tensor([[1, 2]] ,device=__lowercase)
lowercase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=__lowercase)
# forward pass
with torch.no_grad():
lowercase = model(input_ids=__lowercase ,bbox=__lowercase)
lowercase = torch.Size([1, 2, 7_6_8])
lowercase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] ,device=__lowercase ,)
self.assertTrue(outputs.last_hidden_state.shape ,__lowercase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,__lowercase ,atol=1E-3))
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( __snake_case , unittest.TestCase ):
lowercase_ : str =LEDTokenizer
lowercase_ : Union[str, Any] =LEDTokenizerFast
lowercase_ : Tuple =True
def A__ ( self):
super().setUp()
lowercase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase = dict(zip(A_ ,range(len(A_))))
lowercase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase = {"unk_token": "<unk>"}
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write(json.dumps(A_) + '''\n''')
with open(self.merges_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A_))
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**A_)
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**A_)
def A__ ( self ,A__):
return "lower newer", "lower newer"
@cached_property
def A__ ( self):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''')
@cached_property
def A__ ( self):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''')
@require_torch
def A__ ( self):
lowercase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(A_ ,max_length=len(A_) ,padding=A_ ,return_tensors='''pt''')
self.assertIsInstance(A_ ,A_)
self.assertEqual((2, 9) ,batch.input_ids.shape)
self.assertEqual((2, 9) ,batch.attention_mask.shape)
lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(A_ ,A_)
@require_torch
def A__ ( self):
lowercase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(A_ ,padding=A_ ,return_tensors='''pt''')
self.assertIn('''input_ids''' ,A_)
self.assertIn('''attention_mask''' ,A_)
self.assertNotIn('''labels''' ,A_)
self.assertNotIn('''decoder_attention_mask''' ,A_)
@require_torch
def A__ ( self):
lowercase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(text_target=A_ ,max_length=3_2 ,padding='''max_length''' ,return_tensors='''pt''')
self.assertEqual(3_2 ,targets['''input_ids'''].shape[1])
@require_torch
def A__ ( self):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] ,padding=A_ ,truncation=A_ ,return_tensors='''pt''')
self.assertIsInstance(A_ ,A_)
self.assertEqual(batch.input_ids.shape ,(2, 5_1_2_2))
@require_torch
def A__ ( self):
lowercase = ["A long paragraph for summarization."]
lowercase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(A_ ,return_tensors='''pt''')
lowercase = tokenizer(text_target=A_ ,return_tensors='''pt''')
lowercase = inputs["input_ids"]
lowercase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def A__ ( self):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = ["Summary of the text.", "Another summary."]
lowercase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase = tokenizer(A_ ,padding=A_)
lowercase = [[0] * len(A_) for x in encoded_output["input_ids"]]
lowercase = tokenizer.pad(A_)
self.assertSequenceEqual(outputs['''global_attention_mask'''] ,A_)
def A__ ( self):
pass
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(A_ ,**A_)
lowercase = self.tokenizer_class.from_pretrained(A_ ,**A_)
lowercase = "A, <mask> AllenNLP sentence."
lowercase = tokenizer_r.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_)
lowercase = tokenizer_p.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_)
self.assertEqual(sum(tokens_r['''token_type_ids''']) ,sum(tokens_p['''token_type_ids''']))
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) ,sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) ,)
lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
A_ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
A_ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase__ :List[str] = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class lowercase ( __a ):
lowercase_ : int ="albert"
def __init__( self ,A__=3_0_0_0_0 ,A__=1_2_8 ,A__=4_0_9_6 ,A__=1_2 ,A__=1 ,A__=6_4 ,A__=1_6_3_8_4 ,A__=1 ,A__="gelu_new" ,A__=0 ,A__=0 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0.1 ,A__="absolute" ,A__=0 ,A__=2 ,A__=3 ,**A__ ,):
super().__init__(pad_token_id=lowerCAmelCase_ ,bos_token_id=lowerCAmelCase_ ,eos_token_id=lowerCAmelCase_ ,**lowerCAmelCase_)
lowercase = vocab_size
lowercase = embedding_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_hidden_groups
lowercase = num_attention_heads
lowercase = inner_group_num
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout_prob
lowercase = position_embedding_type
class lowercase ( __a ):
@property
def A__ ( self):
if self.task == "multiple-choice":
lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = inspect.getfile(accelerate.test_utils)
lowercase = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase = test_metrics
@require_cpu
def A__ ( self):
debug_launcher(self.test_metrics.main ,num_processes=1)
@require_cpu
def A__ ( self):
debug_launcher(self.test_metrics.main)
@require_single_gpu
def A__ ( self):
self.test_metrics.main()
@require_multi_gpu
def A__ ( self):
print(f'Found {torch.cuda.device_count()} devices.')
lowercase = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__a ,env=os.environ.copy())
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowercase__ :Tuple = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,**A__):
super().__init__(*UpperCAmelCase__ ,**UpperCAmelCase__)
requires_backends(self ,'''decord''')
self.check_model_type(UpperCAmelCase__)
def A__ ( self ,A__=None ,A__=None ,A__=None):
lowercase = {}
if frame_sampling_rate is not None:
lowercase = frame_sampling_rate
if num_frames is not None:
lowercase = num_frames
lowercase = {}
if top_k is not None:
lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,A__ ,**A__):
return super().__call__(UpperCAmelCase__ ,**UpperCAmelCase__)
def A__ ( self ,A__ ,A__=None ,A__=1):
if num_frames is None:
lowercase = self.model.config.num_frames
if video.startswith('''http://''') or video.startswith('''https://'''):
lowercase = BytesIO(requests.get(UpperCAmelCase__).content)
lowercase = VideoReader(UpperCAmelCase__)
videoreader.seek(0)
lowercase = 0
lowercase = num_frames * frame_sampling_rate - 1
lowercase = np.linspace(UpperCAmelCase__ ,UpperCAmelCase__ ,num=UpperCAmelCase__ ,dtype=np.intaa)
lowercase = videoreader.get_batch(UpperCAmelCase__).asnumpy()
lowercase = list(UpperCAmelCase__)
lowercase = self.image_processor(UpperCAmelCase__ ,return_tensors=self.framework)
return model_inputs
def A__ ( self ,A__):
lowercase = self.model(**UpperCAmelCase__)
return model_outputs
def A__ ( self ,A__ ,A__=5):
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1)[0]
lowercase , lowercase = probs.topk(UpperCAmelCase__)
else:
raise ValueError(f'Unsupported framework: {self.framework}')
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ ,UpperCAmelCase__)]
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Optional[Any] = logging.get_logger(__name__)
lowercase__ :Optional[int] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowercase ( __a ):
lowercase_ : Dict ='''gpt_neox'''
def __init__( self ,A__=5_0_4_3_2 ,A__=6_1_4_4 ,A__=4_4 ,A__=6_4 ,A__=2_4_5_7_6 ,A__="gelu" ,A__=0.25 ,A__=1_0_0_0_0 ,A__=0.0 ,A__=0.0 ,A__=0.1 ,A__=2_0_4_8 ,A__=0.02 ,A__=1E-5 ,A__=True ,A__=0 ,A__=2 ,A__=False ,A__=True ,A__=None ,**A__ ,):
super().__init__(bos_token_id=A__ ,eos_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = rotary_pct
lowercase = rotary_emb_base
lowercase = attention_dropout
lowercase = hidden_dropout
lowercase = classifier_dropout
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = use_cache
lowercase = tie_word_embeddings
lowercase = use_parallel_residual
lowercase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''')
def A__ ( self):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,A__) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'got {self.rope_scaling}')
lowercase = self.rope_scaling.get('''type''' ,A__)
lowercase = self.rope_scaling.get('''factor''' ,A__)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(A__ ,A__) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowercase__ :List[str] = Mapping[str, np.ndarray]
lowercase__ :Optional[int] = Mapping[str, Any] # Is a nested dict.
lowercase__ :Any = 0.01
@dataclasses.dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowercase :
lowercase_ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase_ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase_ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase_ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase_ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase_ : Optional[np.ndarray] =None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase_ : Optional[str] =None
# Templates used to generate this protein (prediction-only)
lowercase_ : Optional[Sequence[str]] =None
# Chain corresponding to each parent
lowercase_ : Optional[Sequence[int]] =None
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = R'''(\[[A-Z]+\]\n)'''
lowercase = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
lowercase = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase = ['''N''', '''CA''', '''C''']
lowercase = None
lowercase = None
lowercase = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
lowercase = '''X''' # FIXME: strings are immutable
lowercase = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
lowercase = np.array(_SCREAMING_SNAKE_CASE )
lowercase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
'''simple docstring'''
lowercase = []
lowercase = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
lowercase = prot.parents
lowercase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
lowercase = ['''N/A''']
pdb_headers.append(f'PARENT {" ".join(_SCREAMING_SNAKE_CASE )}' )
return pdb_headers
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
lowercase = pdb_str.split('''\n''' )
lowercase = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
lowercase = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase = []
if prot.parents_chain_index is not None:
lowercase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
lowercase = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ['''N/A'''] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase = [['''N/A''']]
def make_parent_line(lowerCAmelCase__ ) -> str:
return f'PARENT {" ".join(_SCREAMING_SNAKE_CASE )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
lowercase = parents_per_chain[chain_counter]
else:
lowercase = ['''N/A''']
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = residue_constants.restypes + ['''X''']
def res_atoa(lowerCAmelCase__ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase = residue_constants.atom_types
lowercase = []
lowercase = prot.atom_mask
lowercase = prot.aatype
lowercase = prot.atom_positions
lowercase = prot.residue_index.astype(np.intaa )
lowercase = prot.b_factors
lowercase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
lowercase = aatype.shape[0]
lowercase = 1
lowercase = 0
lowercase = string.ascii_uppercase
lowercase = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
lowercase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase = '''ATOM'''
lowercase = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else f' {atom_name}'
lowercase = ''''''
lowercase = ''''''
lowercase = 1.00
lowercase = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase = ''''''
lowercase = '''A'''
if chain_index is not None:
lowercase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
lowercase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase = True
lowercase = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase = '''TER'''
lowercase = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowercase__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowercase__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowercase__ )
return parser.parse_args()
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = parse_args()
# Import training_script as a module.
lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase = script_fpath.stem
lowercase = importlib.import_module(lowercase__ )
# Patch sys.argv
lowercase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
from collections.abc import Sequence
from queue import Queue
class lowercase :
def __init__( self ,A__ ,A__ ,A__ ,A__=None ,A__=None):
lowercase : str = start
lowercase : Optional[Any] = end
lowercase : Optional[Any] = val
lowercase : str = (start + end) // 2
lowercase : Dict = left
lowercase : Any = right
def __repr__( self):
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class lowercase :
def __init__( self ,A__ ,A__):
lowercase : Union[str, Any] = collection
lowercase : Any = function
if self.collection:
lowercase : Any = self._build_tree(0 ,len(lowercase__) - 1)
def A__ ( self ,A__ ,A__):
self._update_tree(self.root ,lowercase__ ,lowercase__)
def A__ ( self ,A__ ,A__):
return self._query_range(self.root ,lowercase__ ,lowercase__)
def A__ ( self ,A__ ,A__):
if start == end:
return SegmentTreeNode(lowercase__ ,lowercase__ ,self.collection[start])
lowercase : Tuple = (start + end) // 2
lowercase : Optional[Any] = self._build_tree(lowercase__ ,lowercase__)
lowercase : List[str] = self._build_tree(mid + 1 ,lowercase__)
return SegmentTreeNode(lowercase__ ,lowercase__ ,self.fn(left.val ,right.val) ,lowercase__ ,lowercase__)
def A__ ( self ,A__ ,A__ ,A__):
if node.start == i and node.end == i:
lowercase : Tuple = val
return
if i <= node.mid:
self._update_tree(node.left ,lowercase__ ,lowercase__)
else:
self._update_tree(node.right ,lowercase__ ,lowercase__)
lowercase : Optional[Any] = self.fn(node.left.val ,node.right.val)
def A__ ( self ,A__ ,A__ ,A__):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left ,lowercase__ ,lowercase__)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left ,lowercase__ ,node.mid) ,self._query_range(node.right ,node.mid + 1 ,lowercase__) ,)
else:
# range in right child tree
return self._query_range(node.right ,lowercase__ ,lowercase__)
def A__ ( self):
if self.root is not None:
lowercase : int = Queue()
queue.put(self.root)
while not queue.empty():
lowercase : str = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
lowercase__ :Any = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowercase__ :Optional[int] = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
lowercase_ : Union[str, Any] =["""pixel_values"""]
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BILINEAR ,A__ = True ,A__ = 1 / 2_5_5 ,A__ = True ,A__ = None ,A__ = True ,**A__ ,):
super().__init__(**lowercase__)
lowercase = size if size is not None else {"""shortest_edge""": 2_2_4}
lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__)
lowercase = crop_size if crop_size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''')
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_flip_channel_order
def A__ ( self ,A__ ,A__ ,A__ = PIL.Image.BILINEAR ,A__ = None ,**A__ ,):
lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__)
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}')
lowercase = get_resize_output_image_size(lowercase__ ,size=size['''shortest_edge'''] ,default_to_square=lowercase__)
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = get_size_dict(lowercase__)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
return center_crop(lowercase__ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase__ ,**lowercase__)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__)
def A__ ( self ,A__ ,A__ = None):
return flip_channel_order(lowercase__ ,data_format=lowercase__)
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase = size if size is not None else self.size
lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__)
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''')
lowercase = make_list_of_images(lowercase__)
if not valid_images(lowercase__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(lowercase__) for image in images]
if do_resize:
lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=lowercase__ ,size=lowercase__) for image in images]
if do_rescale:
lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase = [self.flip_channel_order(image=lowercase__) for image in images]
lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__) for image in images]
lowercase = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__)
def A__ ( self ,A__ ,A__ = None):
lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__) != len(lowercase__):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(lowercase__):
lowercase = target_sizes.numpy()
lowercase = []
for idx in range(len(lowercase__)):
lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) ,size=target_sizes[idx] ,mode='''bilinear''' ,align_corners=lowercase__)
lowercase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase__)
else:
lowercase = logits.argmax(dim=1)
lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :List[str] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class lowercase ( __SCREAMING_SNAKE_CASE ):
lowercase_ : Any ='''ibert'''
def __init__( self ,A__=3_0_5_2_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=False ,A__="none" ,**A__ ,):
super().__init__(pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = quant_mode
lowercase = force_dequant
class lowercase ( __SCREAMING_SNAKE_CASE ):
@property
def A__ ( self):
if self.task == "multiple-choice":
lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple =["image_processor", "tokenizer"]
lowercase_ : Any ="ViTImageProcessor"
lowercase_ : str =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self ,A__=None ,A__=None ,**A__):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,A__ ,)
lowercase = kwargs.pop('''feature_extractor''')
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(A__ ,A__)
def __call__( self ,A__=None ,A__=None ,A__=None ,A__=None ,**A__):
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''')
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''')
if text is not None:
lowercase = self.tokenizer(A__ ,return_tensors=A__ ,**A__)
if visual_prompt is not None:
lowercase = self.image_processor(A__ ,return_tensors=A__ ,**A__)
if images is not None:
lowercase = self.image_processor(A__ ,return_tensors=A__ ,**A__)
if visual_prompt is not None and images is not None:
lowercase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**A__) ,tensor_type=A__)
def A__ ( self ,*A__ ,**A__):
return self.tokenizer.batch_decode(*A__ ,**A__)
def A__ ( self ,*A__ ,**A__):
return self.tokenizer.decode(*A__ ,**A__)
@property
def A__ ( self):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,A__ ,)
return self.image_processor_class
@property
def A__ ( self):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,A__ ,)
return self.image_processor
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowercase__ :Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = int(number**0.5 )
return number == sq * sq
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(__UpperCamelCase , __UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase ( lowerCAmelCase__ = 35 ):
'''simple docstring'''
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ):
lowercase = int(sqrt(__UpperCamelCase ) )
lowercase = int(sqrt(__UpperCamelCase ) )
lowercase = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ):
lowercase = int(sqrt(__UpperCamelCase ) )
lowercase = int(sqrt(__UpperCamelCase ) )
lowercase = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
for num, den in unique_s:
total += Fraction(__UpperCamelCase , __UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :str = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowerCAmelCase , unittest.TestCase ):
lowercase_ : Any =DebertaVaTokenizer
lowercase_ : Optional[Any] =DebertaVaTokenizerFast
lowercase_ : int =True
lowercase_ : str =True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = DebertaVaTokenizer(_lowerCAmelCase ,unk_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self ,A__):
lowercase = '''this is a test'''
lowercase = '''this is a test'''
return input_text, output_text
def A__ ( self):
lowercase = '''<pad>'''
lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) ,_lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) ,_lowerCAmelCase)
def A__ ( self):
lowercase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<pad>''')
self.assertEqual(vocab_keys[1] ,'''<unk>''')
self.assertEqual(vocab_keys[-1] ,'''[PAD]''')
self.assertEqual(len(_lowerCAmelCase) ,3_0_0_0_1)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size ,3_0_0_0_0)
def A__ ( self):
lowercase = ''' \tHeLLo!how \n Are yoU? '''
lowercase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase = DebertaVaTokenizer(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase)
lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = DebertaVaTokenizerFast(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase)
lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''')
def A__ ( self):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''')
def A__ ( self):
pass
def A__ ( self):
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase = DebertaVaTokenizer(_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = DebertaVaTokenizerFast(_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
def A__ ( self):
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase = DebertaVaTokenizer(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = DebertaVaTokenizerFast(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
def A__ ( self):
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase = DebertaVaTokenizer(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = DebertaVaTokenizerFast(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
def A__ ( self):
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase = DebertaVaTokenizer(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = DebertaVaTokenizerFast(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
def A__ ( self):
lowercase = ''' \tHeLLo!how \n Are yoU? '''
lowercase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase = DebertaVaTokenizer(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = DebertaVaTokenizerFast(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,split_by_punct=_lowerCAmelCase)
lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
def A__ ( self):
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase))
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase)
lowercase = rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(_lowerCAmelCase)
lowercase = rust_tokenizer.encode(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
def A__ ( self):
lowercase = '''This is a test'''
lowercase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
lowercase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase = DebertaVaTokenizer(_lowerCAmelCase ,keep_accents=_lowerCAmelCase)
lowercase = DebertaVaTokenizerFast(_lowerCAmelCase ,keep_accents=_lowerCAmelCase)
lowercase = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = rust_tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = rust_tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
# fmt: off
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
lowercase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = rust_tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
lowercase = rust_tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase)
def A__ ( self):
lowercase = DebertaVaTokenizer(_lowerCAmelCase)
lowercase = tokenizer.encode('''sequence builders''')
lowercase = tokenizer.encode('''multi-sequence build''')
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase)
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_lowerCAmelCase)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_lowerCAmelCase ,)
@slow
def A__ ( self):
lowercase = {'''input_ids''': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name='''microsoft/deberta-v2-xlarge''' ,revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' ,)
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
from math import sqrt
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def UpperCamelCase ( lowerCAmelCase__ = 1_0000 ):
'''simple docstring'''
lowercase = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :Any = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase__ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
import os
from datetime import datetime as dt
from github import Github
lowercase__ :List[str] = [
"good first issue",
"feature request",
"wip",
]
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = Github(os.environ['''GITHUB_TOKEN'''] )
lowercase = g.get_repo('''huggingface/accelerate''' )
lowercase = repo.get_issues(state='''open''' )
for issue in open_issues:
lowercase = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCamelCase_ )
lowercase = comments[0] if len(lowerCamelCase_ ) > 0 else None
lowercase = dt.utcnow()
lowercase = (current_time - issue.updated_at).days
lowercase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ :Optional[int] = logging.get_logger(__name__)
class lowercase ( __lowercase ):
lowercase_ : List[str] =['pixel_values']
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BICUBIC ,A__ = True ,A__ = None ,A__ = True ,A__ = 1 / 2_5_5 ,A__ = True ,A__ = IMAGENET_DEFAULT_MEAN ,A__ = IMAGENET_DEFAULT_STD ,**A__ ,):
super().__init__(**__A)
lowercase = size if size is not None else {'''shortest_edge''': 2_2_4}
lowercase = get_size_dict(__A ,default_to_square=__A)
lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase = get_size_dict(__A ,param_name='''crop_size''')
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BICUBIC ,A__ = None ,**A__ ,):
lowercase = get_size_dict(__A ,default_to_square=__A)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase = int((2_5_6 / 2_2_4) * size['''shortest_edge'''])
lowercase = get_resize_output_image_size(__A ,size=__A ,default_to_square=__A)
lowercase = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}')
return resize(
__A ,size=(size_dict['''height'''], size_dict['''width''']) ,resample=__A ,data_format=__A ,**__A)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = get_size_dict(__A)
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}')
return center_crop(__A ,size=(size['''height'''], size['''width''']) ,data_format=__A ,**__A)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(__A ,scale=__A ,data_format=__A ,**__A)
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(__A ,mean=__A ,std=__A ,data_format=__A ,**__A)
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(__A ,default_to_square=__A)
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(__A ,param_name='''crop_size''')
lowercase = make_list_of_images(__A)
if not valid_images(__A):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(__A) for image in images]
if do_resize:
lowercase = [self.resize(__A ,__A ,__A) for image in images]
if do_center_crop:
lowercase = [self.center_crop(__A ,__A) for image in images]
if do_rescale:
lowercase = [self.rescale(__A ,__A) for image in images]
if do_normalize:
lowercase = [self.normalize(__A ,__A ,__A) for image in images]
lowercase = [to_channel_dimension_format(__A ,__A) for image in images]
lowercase = {'''pixel_values''': images}
return BatchFeature(data=__A ,tensor_type=__A)
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def A__ ( self):
lowercase = tempfile.mkdtemp()
lowercase = 8
# DPR tok
lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase = os.path.join(self.tmpdirname ,'''dpr_tokenizer''')
os.makedirs(__A ,exist_ok=__A)
lowercase = os.path.join(__A ,DPR_VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
# BART tok
lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase = dict(zip(__A ,range(len(__A))))
lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase = {'''unk_token''': '''<unk>'''}
lowercase = os.path.join(self.tmpdirname ,'''bart_tokenizer''')
os.makedirs(__A ,exist_ok=__A)
lowercase = os.path.join(__A ,BART_VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(__A ,BART_VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write(json.dumps(__A) + '''\n''')
with open(self.merges_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__A))
def A__ ( self):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'''dpr_tokenizer'''))
def A__ ( self):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'''bart_tokenizer'''))
def A__ ( self):
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def A__ ( self):
lowercase = os.path.join(self.tmpdirname ,'''rag_tokenizer''')
lowercase = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict())
lowercase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer())
rag_config.save_pretrained(__A)
rag_tokenizer.save_pretrained(__A)
lowercase = RagTokenizer.from_pretrained(__A ,config=__A)
self.assertIsInstance(new_rag_tokenizer.question_encoder ,__A)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator ,__A)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab())
@slow
def A__ ( self):
lowercase = RagTokenizer.from_pretrained('''facebook/rag-token-nq''')
lowercase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowercase = tokenizer(__A)
self.assertIsNotNone(__A)
@slow
def A__ ( self):
lowercase = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''')
lowercase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowercase = tokenizer(__A)
self.assertIsNotNone(__A)
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Optional[Any] = logging.get_logger(__name__)
lowercase__ :Any = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowercase ( __UpperCAmelCase ):
lowercase_ : List[Any] ='''wavlm'''
def __init__( self ,A__=3_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=0.1 ,A__=0.0 ,A__=0.1 ,A__=0.1 ,A__=0.02 ,A__=1E-5 ,A__="group" ,A__="gelu" ,A__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,A__=(5, 2, 2, 2, 2, 2, 2) ,A__=(1_0, 3, 3, 3, 3, 2, 2) ,A__=False ,A__=1_2_8 ,A__=1_6 ,A__=3_2_0 ,A__=8_0_0 ,A__=False ,A__=True ,A__=0.05 ,A__=1_0 ,A__=2 ,A__=0.0 ,A__=1_0 ,A__=3_2_0 ,A__=2 ,A__=0.1 ,A__=1_0_0 ,A__=2_5_6 ,A__=2_5_6 ,A__=0.1 ,A__="mean" ,A__=False ,A__=False ,A__=2_5_6 ,A__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) ,A__=(5, 3, 3, 1, 1) ,A__=(1, 2, 3, 1, 1) ,A__=5_1_2 ,A__=8_0 ,A__=0 ,A__=1 ,A__=2 ,A__=False ,A__=3 ,A__=2 ,A__=3 ,A__=None ,**A__ ,):
super().__init__(**__SCREAMING_SNAKE_CASE ,pad_token_id=__SCREAMING_SNAKE_CASE ,bos_token_id=__SCREAMING_SNAKE_CASE ,eos_token_id=__SCREAMING_SNAKE_CASE)
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(__SCREAMING_SNAKE_CASE)
lowercase = list(__SCREAMING_SNAKE_CASE)
lowercase = list(__SCREAMING_SNAKE_CASE)
lowercase = conv_bias
lowercase = num_buckets
lowercase = max_bucket_distance
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim)
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layerdrop
lowercase = layer_norm_eps
lowercase = initializer_range
lowercase = num_ctc_classes
lowercase = vocab_size
lowercase = do_stable_layer_norm
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowercase = num_codevectors_per_group
lowercase = num_codevector_groups
lowercase = contrastive_logits_temperature
lowercase = num_negatives
lowercase = codevector_dim
lowercase = proj_codevector_dim
lowercase = diversity_loss_weight
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# adapter
lowercase = add_adapter
lowercase = adapter_kernel_size
lowercase = adapter_stride
lowercase = num_adapter_layers
lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase = list(__SCREAMING_SNAKE_CASE)
lowercase = list(__SCREAMING_SNAKE_CASE)
lowercase = list(__SCREAMING_SNAKE_CASE)
lowercase = xvector_output_dim
@property
def A__ ( self):
return functools.reduce(operator.mul ,self.conv_stride ,1)
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
import re
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(re.findall('''[ATCG]''' , lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ :List[str] = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
lowercase__ :str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowercase__ :Dict = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowercase__ :Tuple = [2, 4, 1, 5]
lowercase__ :Any = len(train_data)
lowercase__ :int = 0.009
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_lowerCamelCase , _lowerCamelCase ) - output(
_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
for i in range(len(_lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=m ):
'''simple docstring'''
lowercase = 0
for i in range(_lowerCamelCase ):
if index == -1:
summation_value += _error(_lowerCamelCase )
else:
summation_value += _error(_lowerCamelCase ) * train_data[i][0][index]
return summation_value
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = summation_of_cost_derivative(_lowerCamelCase , _lowerCamelCase ) / m
return cost_derivative_value
def UpperCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase = 0.00_00_02
lowercase = 0
lowercase = 0
while True:
j += 1
lowercase = [0, 0, 0, 0]
for i in range(0 , len(_lowerCamelCase ) ):
lowercase = get_cost_derivative(i - 1 )
lowercase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase , rtol=_lowerCamelCase , ):
break
lowercase = temp_parameter_vector
print(('''Number of iterations:''', j) )
def UpperCamelCase ( ):
'''simple docstring'''
for i in range(len(_lowerCamelCase ) ):
print(('''Actual output value:''', output(_lowerCamelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(_lowerCamelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase__ :Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase__ :Optional[Any] = logging.getLogger()
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase = parser.parse_args()
return args.f
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__="eval" ):
'''simple docstring'''
lowercase = os.path.join(lowercase_ , f'{split}_results.json' )
if os.path.exists(lowercase_ ):
with open(lowercase_ , '''r''' ) as f:
return json.load(lowercase_ )
raise ValueError(f'can\'t find {path}' )
lowercase__ :str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(A__ ,'''argv''' ,A__):
run_flax_glue.main()
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75)
@slow
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(A__ ,'''argv''' ,A__):
run_clm_flax.main()
lowercase = get_results(A__)
self.assertLess(result['''eval_perplexity'''] ,1_0_0)
@slow
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(A__ ,'''argv''' ,A__):
run_summarization_flax.main()
lowercase = get_results(A__ ,split='''test''')
self.assertGreaterEqual(result['''test_rouge1'''] ,1_0)
self.assertGreaterEqual(result['''test_rouge2'''] ,2)
self.assertGreaterEqual(result['''test_rougeL'''] ,7)
self.assertGreaterEqual(result['''test_rougeLsum'''] ,7)
@slow
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(A__ ,'''argv''' ,A__):
run_mlm_flax.main()
lowercase = get_results(A__)
self.assertLess(result['''eval_perplexity'''] ,4_2)
@slow
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(A__ ,'''argv''' ,A__):
run_ta_mlm_flax.main()
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.42)
@slow
def A__ ( self):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowercase = 7 if get_gpu_count() > 1 else 2
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(A__ ,'''argv''' ,A__):
run_flax_ner.main()
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75)
self.assertGreaterEqual(result['''eval_f1'''] ,0.3)
@slow
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(A__ ,'''argv''' ,A__):
run_qa.main()
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_f1'''] ,3_0)
self.assertGreaterEqual(result['''eval_exact'''] ,3_0)
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] =(UnCLIPScheduler,)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a)
return config
def A__ ( self):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a)
def A__ ( self):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a)
def A__ ( self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a)
def A__ ( self):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=_a)
def A__ ( self):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a)
def A__ ( self):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a ,prev_timestep=_a)
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type='''fixed_small_log''')
lowercase = scheduler_class(**_a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.0_000E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type='''learned_range''')
lowercase = scheduler_class(**_a)
lowercase = 0.5
assert scheduler._get_variance(1 ,predicted_variance=_a) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=_a) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=_a) - -0.0010011 < 1E-5
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_a)
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0)
for i, t in enumerate(_a):
# 1. predict noise residual
lowercase = model(_a ,_a)
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(_a ,_a ,_a ,generator=_a).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(_a))
lowercase = torch.mean(torch.abs(_a))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_a)
scheduler.set_timesteps(2_5)
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0)
for i, t in enumerate(_a):
# 1. predict noise residual
lowercase = model(_a ,_a)
if i + 1 == timesteps.shape[0]:
lowercase = None
else:
lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(
_a ,_a ,_a ,prev_timestep=_a ,generator=_a).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(_a))
lowercase = torch.mean(torch.abs(_a))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def A__ ( self):
pass
def A__ ( self):
pass
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
assert column_title.isupper()
lowercase = 0
lowercase = len(_lowerCamelCase ) - 1
lowercase = 0
while index >= 0:
lowercase = (ord(column_title[index] ) - 64) * pow(26 , _lowerCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :Any = get_tests_dir("fixtures/test_sentencepiece.model")
lowercase__ :Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
lowercase__ :Any = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class lowercase ( snake_case_ , unittest.TestCase ):
lowercase_ : Dict =CamembertTokenizer
lowercase_ : Dict =CamembertTokenizerFast
lowercase_ : Any =True
lowercase_ : List[str] =True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = CamembertTokenizer(A__)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
lowercase = '''<pad>'''
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__) ,A__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__) ,A__)
def A__ ( self):
lowercase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<s>NOTUSED''')
self.assertEqual(vocab_keys[1] ,'''<pad>''')
self.assertEqual(vocab_keys[-1] ,'''<mask>''')
self.assertEqual(len(A__) ,1_0_0_4)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_5)
def A__ ( self):
lowercase = CamembertTokenizer(A__)
tokenizer.save_pretrained(self.tmpdirname)
lowercase = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = tokenizer.encode(A__)
lowercase = rust_tokenizer.encode(A__)
self.assertListEqual(A__ ,A__)
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__)
lowercase = rust_tokenizer.encode(A__ ,add_special_tokens=A__)
self.assertListEqual(A__ ,A__)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowercase = tokenizer.convert_ids_to_tokens(A__)
lowercase = rust_tokenizer.tokenize(A__)
self.assertListEqual(A__ ,A__)
def A__ ( self):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = tokenizer.tokenize(A__)
lowercase = rust_tokenizer.tokenize(A__)
self.assertListEqual(A__ ,A__)
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__)
lowercase = rust_tokenizer.encode(A__ ,add_special_tokens=A__)
self.assertListEqual(A__ ,A__)
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(A__)
lowercase = rust_tokenizer.encode(A__)
self.assertListEqual(A__ ,A__)
@slow
def A__ ( self):
# fmt: off
lowercase = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowercase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=A__ ,model_name='''camembert-base''' ,revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' ,sequences=A__ ,)
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
lowercase = f'The input value of [n={number}] has to be > 0'
raise ValueError(SCREAMING_SNAKE_CASE_ )
else:
lowercase = sylvester(number - 1 )
lowercase = num - 1
lowercase = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self ,A__):
with open(A__ ,encoding='''utf-8''') as input_file:
lowercase : Optional[Any] = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''')
lowercase : str = input_file.read()
lowercase : List[str] = regexp.search(A__)
return match
def A__ ( self ,A__):
with open(A__ ,encoding='''utf-8''') as input_file:
lowercase : Optional[int] = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' ,re.DOTALL)
lowercase : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase : int = regexp.finditer(A__)
lowercase : List[str] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def A__ ( self):
lowercase : int = Path('''./datasets''')
lowercase : Optional[int] = list(dataset_paths.absolute().glob('''**/*.py'''))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A__)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def A__ ( self):
lowercase : List[Any] = Path('''./datasets''')
lowercase : List[str] = list(dataset_paths.absolute().glob('''**/*.py'''))
for dataset in dataset_files:
if self._no_print_statements(str(A__)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( __A , unittest.TestCase ):
lowercase_ : Optional[Any] =ProphetNetTokenizer
lowercase_ : List[str] =False
def A__ ( self):
super().setUp()
lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def A__ ( self ,A__):
lowercase = 'UNwant\u00E9d,running'
lowercase = 'unwanted, running'
return input_text, output_text
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file)
lowercase = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(A__ ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[9, 6, 7, 1_2, 1_0, 1_1])
def A__ ( self):
lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''h\u00E9llo'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def A__ ( self):
lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowercase = {}
for i, token in enumerate(A__):
lowercase = i
lowercase = WordpieceTokenizer(vocab=A__ ,unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') ,[])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') ,['''[UNK]''', '''runn''', '''##ing'''])
@require_torch
def A__ ( self):
lowercase = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''')
lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowercase = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
lowercase = tokenizer(A__ ,padding=A__ ,return_tensors='''pt''')
self.assertIsInstance(A__ ,A__)
lowercase = list(batch.input_ids.numpy()[0])
self.assertListEqual(A__ ,A__)
self.assertEqual((2, 9) ,batch.input_ids.shape)
self.assertEqual((2, 9) ,batch.attention_mask.shape)
def A__ ( self):
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def A__ ( self):
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def A__ ( self):
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
@slow
def A__ ( self):
lowercase = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''')
lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowercase__ :Tuple = logging.getLogger(__name__)
class lowercase ( __lowercase ):
lowercase_ : int ='''summarization'''
lowercase_ : Tuple =['''loss''']
lowercase_ : List[Any] =ROUGE_KEYS
lowercase_ : Optional[int] ='''rouge2'''
def __init__( self ,A__ ,**A__):
if hparams.sortish_sampler and hparams.gpus > 1:
lowercase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(_A ,num_labels=_A ,mode=self.mode ,**_A)
use_task_specific_params(self.model ,'''summarization''')
save_git_info(self.hparams.output_dir)
lowercase = Path(self.output_dir) / '''metrics.json'''
lowercase = Path(self.output_dir) / '''hparams.pkl'''
pickle_save(self.hparams ,self.hparams_save_path)
lowercase = 0
lowercase = defaultdict(_A)
lowercase = self.config.model_type
lowercase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
lowercase = {
'''data_dir''': self.hparams.data_dir,
'''max_source_length''': self.hparams.max_source_length,
'''prefix''': self.model.config.prefix or '''''',
}
lowercase = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
lowercase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowercase = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
lowercase = get_git_info()['''repo_sha''']
lowercase = hparams.num_workers
lowercase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,_A):
lowercase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowercase = self.decoder_start_token_id
lowercase = (
SeqaSeqDataset if hasattr(self.tokenizer ,'''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
lowercase = False
lowercase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowercase = self.hparams.eval_max_gen_length
else:
lowercase = self.model.config.max_length
lowercase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A__ ( self ,A__):
lowercase = {
k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A ,Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir) / '''tok_batch.json''')
lowercase = True
return readable_batch
def A__ ( self ,A__ ,**A__):
return self.model(_A ,**_A)
def A__ ( self ,A__):
lowercase = self.tokenizer.batch_decode(
_A ,skip_special_tokens=_A ,clean_up_tokenization_spaces=_A)
return lmap(str.strip ,_A)
def A__ ( self ,A__):
lowercase = self.tokenizer.pad_token_id
lowercase , lowercase = batch['''input_ids'''], batch['''attention_mask''']
lowercase = batch['''labels''']
if isinstance(self.model ,_A):
lowercase = self.model._shift_right(_A)
else:
lowercase = shift_tokens_right(_A ,_A)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowercase = decoder_input_ids
self.save_readable_batch(_A)
lowercase = self(_A ,attention_mask=_A ,decoder_input_ids=_A ,use_cache=_A)
lowercase = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowercase = nn.CrossEntropyLoss(ignore_index=_A)
assert lm_logits.shape[-1] == self.vocab_size
lowercase = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1]) ,tgt_ids.view(-1))
else:
lowercase = nn.functional.log_softmax(_A ,dim=-1)
lowercase , lowercase = label_smoothed_nll_loss(
_A ,_A ,self.hparams.label_smoothing ,ignore_index=_A)
return (loss,)
@property
def A__ ( self):
return self.tokenizer.pad_token_id
def A__ ( self ,A__ ,A__):
lowercase = self._step(_A)
lowercase = dict(zip(self.loss_names ,_A))
# tokens per batch
lowercase = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum()
lowercase = batch['''input_ids'''].shape[0]
lowercase = batch['''input_ids'''].eq(self.pad).sum()
lowercase = batch['''input_ids'''].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A__ ( self ,A__ ,A__):
return self._generative_step(_A)
def A__ ( self ,A__ ,A__="val"):
self.step_count += 1
lowercase = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
lowercase = losses['''loss''']
lowercase = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
lowercase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowercase = torch.tensor(_A).type_as(_A)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(_A)
lowercase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowercase = self.step_count
self.metrics[prefix].append(_A) # callback writes this to self.metrics_save_path
lowercase = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def A__ ( self ,A__ ,A__):
return calculate_rouge(_A ,_A)
def A__ ( self ,A__):
lowercase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowercase = self.model.generate(
batch['''input_ids'''] ,attention_mask=batch['''attention_mask'''] ,use_cache=_A ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
lowercase = (time.time() - ta) / batch['''input_ids'''].shape[0]
lowercase = self.ids_to_clean_text(_A)
lowercase = self.ids_to_clean_text(batch['''labels'''])
lowercase = self._step(_A)
lowercase = dict(zip(self.loss_names ,_A))
lowercase = self.calc_generative_metrics(_A ,_A)
lowercase = np.mean(lmap(_A ,_A))
base_metrics.update(gen_time=_A ,gen_len=_A ,preds=_A ,target=_A ,**_A)
return base_metrics
def A__ ( self ,A__ ,A__):
return self._generative_step(_A)
def A__ ( self ,A__):
return self.validation_epoch_end(_A ,prefix='''test''')
def A__ ( self ,A__):
lowercase = self.n_obs[type_path]
lowercase = self.target_lens[type_path]
lowercase = self.dataset_class(
self.tokenizer ,type_path=_A ,n_obs=_A ,max_target_length=_A ,**self.dataset_kwargs ,)
return dataset
def A__ ( self ,A__ ,A__ ,A__ = False):
lowercase = self.get_dataset(_A)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowercase = dataset.make_sortish_sampler(_A ,distributed=self.hparams.gpus > 1)
return DataLoader(
_A ,batch_size=_A ,collate_fn=dataset.collate_fn ,shuffle=_A ,num_workers=self.num_workers ,sampler=_A ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowercase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1)
return DataLoader(
_A ,batch_sampler=_A ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
_A ,batch_size=_A ,collate_fn=dataset.collate_fn ,shuffle=_A ,num_workers=self.num_workers ,sampler=_A ,)
def A__ ( self):
lowercase = self.get_dataloader('''train''' ,batch_size=self.hparams.train_batch_size ,shuffle=_A)
return dataloader
def A__ ( self):
return self.get_dataloader('''val''' ,batch_size=self.hparams.eval_batch_size)
def A__ ( self):
return self.get_dataloader('''test''' ,batch_size=self.hparams.eval_batch_size)
@staticmethod
def A__ ( A__ ,A__):
BaseTransformer.add_model_specific_args(_A ,_A)
add_generic_args(_A ,_A)
parser.add_argument(
'''--max_source_length''' ,default=1_0_2_4 ,type=_A ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--max_target_length''' ,default=5_6 ,type=_A ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--val_max_target_length''' ,default=1_4_2 ,type=_A ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--test_max_target_length''' ,default=1_4_2 ,type=_A ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument('''--freeze_encoder''' ,action='''store_true''')
parser.add_argument('''--freeze_embeds''' ,action='''store_true''')
parser.add_argument('''--sortish_sampler''' ,action='''store_true''' ,default=_A)
parser.add_argument('''--overwrite_output_dir''' ,action='''store_true''' ,default=_A)
parser.add_argument('''--max_tokens_per_batch''' ,type=_A ,default=_A)
parser.add_argument('''--logger_name''' ,type=_A ,choices=['''default''', '''wandb''', '''wandb_shared'''] ,default='''default''')
parser.add_argument('''--n_train''' ,type=_A ,default=-1 ,required=_A ,help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' ,type=_A ,default=5_0_0 ,required=_A ,help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' ,type=_A ,default=-1 ,required=_A ,help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' ,type=_A ,default='''summarization''' ,required=_A ,help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' ,type=_A ,default=0.0 ,required=_A)
parser.add_argument('''--src_lang''' ,type=_A ,default='''''' ,required=_A)
parser.add_argument('''--tgt_lang''' ,type=_A ,default='''''' ,required=_A)
parser.add_argument('''--eval_beams''' ,type=_A ,default=_A ,required=_A)
parser.add_argument(
'''--val_metric''' ,type=_A ,default=_A ,required=_A ,choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' ,type=_A ,default=_A ,help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' ,type=_A ,default=1 ,required=_A ,help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' ,type=_A ,default=-1 ,required=_A ,help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) ,)
return parser
class lowercase ( __lowercase ):
lowercase_ : Dict ='''translation'''
lowercase_ : Union[str, Any] =['''loss''']
lowercase_ : List[str] =['''bleu''']
lowercase_ : Tuple ='''bleu'''
def __init__( self ,A__ ,**A__):
super().__init__(_A ,**_A)
lowercase = hparams.src_lang
lowercase = hparams.tgt_lang
def A__ ( self ,A__ ,A__):
return calculate_bleu(_A ,_A)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__snake_case )
check_output_dir(__snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowercase = SummarizationModule(__snake_case )
else:
lowercase = TranslationModule(__snake_case )
lowercase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
lowercase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowercase = os.environ.get('''WANDB_PROJECT''' , __snake_case )
lowercase = WandbLogger(name=model.output_dir.name , project=__snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowercase = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowercase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowercase = False
lowercase = args.val_metric == '''loss'''
lowercase = generic_train(
__snake_case , __snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __snake_case ) , early_stopping_callback=__snake_case , logger=__snake_case , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
lowercase = ''''''
lowercase = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__snake_case ) )
if checkpoints:
lowercase = checkpoints[-1]
lowercase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowercase__ :str = argparse.ArgumentParser()
lowercase__ :Any = pl.Trainer.add_argparse_args(parser)
lowercase__ :Optional[int] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowercase__ :List[Any] = parser.parse_args()
main(args)
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.