code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {}
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_A , ''' -> ''' , ''' -> '''.join([str(_A ) for j in self.vertex[i]] ) )
def lowercase_ ( self : int , _A : int , _A : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_A )
else:
# else make a new vertex
UpperCAmelCase__ : Union[str, Any] = [to_vertex]
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_A , _A )
def lowercase_ ( self : List[Any] , _A : int , _A : list ):
'''simple docstring'''
UpperCAmelCase__ : Dict = True
print(_A , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_A , _A )
if __name__ == "__main__":
UpperCamelCase__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 75 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'blip_2_vision_model'
def __init__( self : int , _A : Optional[Any]=1_408 , _A : List[str]=6_144 , _A : str=39 , _A : Dict=16 , _A : Optional[Any]=224 , _A : Any=14 , _A : str="gelu" , _A : str=0.0_0_0_0_1 , _A : Union[str, Any]=0.0 , _A : Dict=1e-10 , _A : Tuple=True , **_A : List[str] , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[Any] = attention_dropout
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : str = qkv_bias
@classmethod
def lowercase_ ( cls : List[Any] , _A : Union[str, os.PathLike] , **_A : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
UpperCAmelCase__ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'blip_2_qformer'
def __init__( self : Dict , _A : List[str]=30_522 , _A : Union[str, Any]=768 , _A : Optional[Any]=12 , _A : List[Any]=12 , _A : Any=3_072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.1 , _A : Dict=0.1 , _A : int=512 , _A : Any=0.0_2 , _A : str=1e-12 , _A : List[Any]=0 , _A : List[Any]="absolute" , _A : Optional[int]=2 , _A : Tuple=1_408 , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Any = position_embedding_type
UpperCAmelCase__ : Dict = cross_attention_frequency
UpperCAmelCase__ : Optional[Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls : Tuple , _A : Union[str, os.PathLike] , **_A : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
UpperCAmelCase__ : str = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'blip-2'
lowerCAmelCase__ = True
def __init__( self : List[str] , _A : Any=None , _A : int=None , _A : Dict=None , _A : int=32 , **_A : Union[str, Any] ):
'''simple docstring'''
super().__init__(**_A )
if vision_config is None:
UpperCAmelCase__ : List[str] = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
UpperCAmelCase__ : Any = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
UpperCAmelCase__ : Optional[Any] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
UpperCAmelCase__ : Union[str, Any] = BlipaVisionConfig(**_A )
UpperCAmelCase__ : List[str] = BlipaQFormerConfig(**_A )
UpperCAmelCase__ : Union[str, Any] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
UpperCAmelCase__ : List[Any] = CONFIG_MAPPING[text_model_type](**_A )
UpperCAmelCase__ : Union[str, Any] = self.text_config.tie_word_embeddings
UpperCAmelCase__ : Tuple = self.text_config.is_encoder_decoder
UpperCAmelCase__ : Optional[int] = num_query_tokens
UpperCAmelCase__ : List[Any] = self.vision_config.hidden_size
UpperCAmelCase__ : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase__ : Union[str, Any] = 1.0
UpperCAmelCase__ : Any = 0.0_2
@classmethod
def lowercase_ ( cls : Optional[int] , _A : BlipaVisionConfig , _A : BlipaQFormerConfig , _A : PretrainedConfig , **_A : Union[str, Any] , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Union[str, Any] = self.vision_config.to_dict()
UpperCAmelCase__ : List[Any] = self.qformer_config.to_dict()
UpperCAmelCase__ : Any = self.text_config.to_dict()
UpperCAmelCase__ : Optional[Any] = self.__class__.model_type
return output
| 75 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'llama'
lowerCAmelCase__ = ['past_key_values']
def __init__( self : Optional[int] , _A : Any=32_000 , _A : List[Any]=4_096 , _A : Union[str, Any]=11_008 , _A : Union[str, Any]=32 , _A : List[Any]=32 , _A : List[Any]=None , _A : List[str]="silu" , _A : Tuple=2_048 , _A : Dict=0.0_2 , _A : List[str]=1e-6 , _A : Dict=True , _A : Optional[Any]=0 , _A : Union[str, Any]=1 , _A : Tuple=2 , _A : str=1 , _A : List[Any]=False , _A : Optional[Any]=None , **_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Any = num_key_value_heads
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : List[str] = rms_norm_eps
UpperCAmelCase__ : Dict = pretraining_tp
UpperCAmelCase__ : Optional[Any] = use_cache
UpperCAmelCase__ : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
UpperCAmelCase__ : Union[str, Any] = self.rope_scaling.get('''type''' , _A )
UpperCAmelCase__ : List[str] = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 75 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase__ : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase__ : List[Any] = ya
UpperCAmelCase__ : Optional[int] = xa
for k in range(lowerCAmelCase__ ):
UpperCAmelCase__ : List[str] = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase_ :
def __init__( self : Any , _A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : str = 13
UpperCAmelCase__ : int = 7
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Dict = 99
UpperCAmelCase__ : List[Any] = 32
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : str = 37
UpperCAmelCase__ : Any = '''gelu'''
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : Dict = 0.1
UpperCAmelCase__ : Tuple = 512
UpperCAmelCase__ : str = 16
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : Union[str, Any] = 0.0_2
UpperCAmelCase__ : Union[str, Any] = 3
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : Any = None
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Any = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : int , _A : int , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFEsmModel(config=_A )
UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase__ : str = model(_A )
UpperCAmelCase__ : str = [input_ids, input_mask]
UpperCAmelCase__ : List[Any] = model(_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , _A : str , _A : Tuple , _A : Any , _A : Any , _A : Union[str, Any] , _A : int , _A : Optional[Any] , _A : str , ):
'''simple docstring'''
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Any = TFEsmModel(config=_A )
UpperCAmelCase__ : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
UpperCAmelCase__ : List[str] = model(_A )
UpperCAmelCase__ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase__ : List[str] = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
UpperCAmelCase__ : str = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , _A : int , _A : Tuple , _A : List[Any] , _A : Dict , _A : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFEsmForMaskedLM(config=_A )
UpperCAmelCase__ : str = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.num_labels
UpperCAmelCase__ : Any = TFEsmForTokenClassification(config=_A )
UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = TFEsmModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase__ : List[Any] = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase__ : Tuple = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Optional[Any] = model(_A )[0]
UpperCAmelCase__ : List[Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
UpperCAmelCase__ : Dict = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase__ : List[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase__ : Any = model(_A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : Optional[int] = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 75 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase__ = threading.Lock()
UpperCamelCase__ = None
UpperCamelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
UpperCamelCase__ = logging.WARNING
UpperCamelCase__ = True
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Optional[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ) -> str:
return __name__.split('''.''' )[0]
def a__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def a__ ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase__ : str = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase__ : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase__ : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase__ : List[Any] = False
def a__ ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase__ : Dict = None
def a__ ( ) -> Dict:
return log_levels
def a__ ( lowerCAmelCase__ = None ) -> logging.Logger:
if name is None:
UpperCAmelCase__ : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCAmelCase__ )
def a__ ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( lowerCAmelCase__ ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def a__ ( ) -> Tuple:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> Union[str, Any]:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> int:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( lowerCAmelCase__ ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCAmelCase__ )
def a__ ( ) -> None:
_configure_library_root_logger()
UpperCAmelCase__ : List[Any] = False
def a__ ( ) -> None:
_configure_library_root_logger()
UpperCAmelCase__ : str = True
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase__ : str = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(lowerCAmelCase__ )
def a__ ( ) -> None:
UpperCAmelCase__ : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCAmelCase__ )
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , lowerCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCamelCase__ = warning_advice
@functools.lru_cache(lowerCAmelCase__ )
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCamelCase__ = warning_once
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , *_A : Optional[int] , **_A : str ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase__ : Dict = args[0] if args else None
def __iter__( self : List[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Dict , _A : Union[str, Any] ):
'''simple docstring'''
def empty_fn(*_A : int , **_A : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict ):
'''simple docstring'''
return self
def __exit__( self : Any , _A : Union[str, Any] , _A : int , _A : str ):
'''simple docstring'''
return
class lowerCamelCase_ :
def __call__( self : List[Any] , *_A : int , **_A : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def lowercase_ ( self : Tuple , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase__ = _tqdm_cls()
def a__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ) -> List[str]:
global _tqdm_active
UpperCAmelCase__ : int = True
hf_hub_utils.enable_progress_bars()
def a__ ( ) -> List[str]:
global _tqdm_active
UpperCAmelCase__ : Optional[Any] = False
hf_hub_utils.disable_progress_bars()
| 75 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a__ ( lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = image.size
UpperCAmelCase__ , UpperCAmelCase__ : Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase__ : int = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCAmelCase__ : int = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_5_5.0
UpperCAmelCase__ : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase__ : Any = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCamelCase_ ( __a ):
def __init__( self : str , _A : VQModel , _A : UNetaDModel , _A : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_A , unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : Optional[int] , _A : Union[torch.Tensor, PIL.Image.Image] = None , _A : Optional[int] = 1 , _A : Optional[int] = 100 , _A : Optional[float] = 0.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
'''simple docstring'''
if isinstance(_A , PIL.Image.Image ):
UpperCAmelCase__ : Tuple = 1
elif isinstance(_A , torch.Tensor ):
UpperCAmelCase__ : Tuple = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_A )}""" )
if isinstance(_A , PIL.Image.Image ):
UpperCAmelCase__ : str = preprocess(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase__ : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase__ : List[str] = next(self.unet.parameters() ).dtype
UpperCAmelCase__ : List[Any] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
UpperCAmelCase__ : Dict = image.to(device=self.device , dtype=_A )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_A , device=self.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ : int = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ : Dict = {}
if accepts_eta:
UpperCAmelCase__ : Union[str, Any] = eta
for t in self.progress_bar(_A ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase__ : Optional[int] = torch.cat([latents, image] , dim=1 )
UpperCAmelCase__ : Optional[Any] = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : str = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase__ : Optional[Any] = self.vqvae.decode(_A ).sample
UpperCAmelCase__ : Any = torch.clamp(_A , -1.0 , 1.0 )
UpperCAmelCase__ : List[Any] = image / 2 + 0.5
UpperCAmelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ : Tuple = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 75 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ ( __a ):
def __init__( self : List[str] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = data
def __iter__( self : List[Any] ):
'''simple docstring'''
for element in self.data:
yield element
def a__ ( lowerCAmelCase__=True ) -> str:
UpperCAmelCase__ : Optional[int] = Accelerator(even_batches=lowerCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[int]:
if iterable:
UpperCAmelCase__ : int = DummyIterableDataset(torch.as_tensor(range(lowerCAmelCase__ ) ) )
else:
UpperCAmelCase__ : str = TensorDataset(torch.as_tensor(range(lowerCAmelCase__ ) ) )
UpperCAmelCase__ : int = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
return dl
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = create_dataloader(accelerator=lowerCAmelCase__ , dataset_size=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : str = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Any = create_accelerator(even_batches=lowerCAmelCase__ )
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : List[str] = create_accelerator(even_batches=lowerCAmelCase__ )
UpperCAmelCase__ : str = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ : Union[str, Any] = accelerator.prepare(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
UpperCAmelCase__ : Optional[int] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : Any = ddp_model(batch[0].float() )
UpperCAmelCase__ : Dict = output.sum()
loss.backward()
batch_idxs.append(lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( lowerCAmelCase__ ) -> Tuple:
with warnings.catch_warnings(record=lowerCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , lowerCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Optional[int]:
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = create_accelerator(even_batches=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ : List[Any] = accelerator.prepare(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
UpperCAmelCase__ : Optional[int] = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = train_dl.batch_sampler.even_batches
UpperCAmelCase__ : Optional[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> str:
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = create_accelerator(even_batches=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ : str = accelerator.prepare(lowerCAmelCase__ )
create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
UpperCAmelCase__ : Union[str, Any] = create_accelerator()
UpperCAmelCase__ : List[str] = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ : int = accelerator.prepare(lowerCAmelCase__ )
create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase__ )
with warnings.catch_warnings(record=lowerCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
pass
assert issubclass(w[-1].category , lowerCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
UpperCAmelCase__ : Tuple = accelerator.state.distributed_type
UpperCAmelCase__ : Dict = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowerCAmelCase__ )
UpperCAmelCase__ : int = original_state
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_A , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_A , '''num_attention_heads''' ) )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Tuple , _A : List[Any]=13 , _A : Union[str, Any]=32 , _A : int=2 , _A : int=3 , _A : List[Any]=640 , _A : Optional[int]=4 , _A : List[str]="silu" , _A : Optional[int]=3 , _A : Optional[Any]=32 , _A : str=0.1 , _A : List[str]=0.1 , _A : List[Any]=0.1 , _A : Optional[int]=0.0_2 , _A : Optional[int]=True , _A : str=True , _A : Dict=10 , _A : Tuple=None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Any = last_hidden_size
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Any = conv_kernel_size
UpperCAmelCase__ : Optional[Any] = output_stride
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = classifier_dropout_prob
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Union[str, Any] = num_labels
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : Tuple = scope
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self : int ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self : List[str] , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = MobileViTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self : List[Any] , _A : List[str] , _A : List[str] , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Optional[int] = MobileViTForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : Union[str, Any] , _A : Any , _A : Optional[int] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = self.num_labels
UpperCAmelCase__ : Any = MobileViTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase__ : Optional[Any] = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs
UpperCAmelCase__ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = MobileViTModelTester(self )
UpperCAmelCase__ : Optional[int] = MobileViTConfigTester(self , config_class=_A , has_text_modality=_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
def check_hidden_states_output(_A : Tuple , _A : Any , _A : List[Any] ):
UpperCAmelCase__ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Any = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : List[Any] = outputs.hidden_states
UpperCAmelCase__ : Tuple = 5
self.assertEqual(len(_A ) , _A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase__ : Any = 2
for i in range(len(_A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Dict = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = MobileViTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(_A )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Optional[int] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCAmelCase__ : Optional[Any] = model.to(_A )
UpperCAmelCase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_A )
UpperCAmelCase__ : List[Any] = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[int] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=_A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-4 ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCAmelCase__ : str = model.to(_A )
UpperCAmelCase__ : Tuple = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_A )
UpperCAmelCase__ : Tuple = outputs.logits.detach().cpu()
UpperCAmelCase__ : Dict = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(50, 60)] )
UpperCAmelCase__ : str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _A )
UpperCAmelCase__ : int = image_processor.post_process_semantic_segmentation(outputs=_A )
UpperCAmelCase__ : str = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _A )
| 75 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 1 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : List[Any] = [x.strip() for x in open(lowerCAmelCase__ ).readlines()]
UpperCAmelCase__ : Tuple = [x.strip() for x in open(lowerCAmelCase__ ).readlines()][: len(lowerCAmelCase__ )]
UpperCAmelCase__ : Optional[int] = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
if save_path is not None:
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 75 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def a__ ( ) -> None:
UpperCAmelCase__ : str = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase__ : Union[str, Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75 | 1 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( enum.Enum ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'generated'
def __init__( self : List[str] , *_A : Optional[Any] , **_A : List[str] ):
'''simple docstring'''
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowercase_ ( self : Any , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Any=None , _A : Union[str, Any]=None , _A : str=None , **_A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {}
if truncation is not None:
UpperCAmelCase__ : Union[str, Any] = truncation
UpperCAmelCase__ : Any = generate_kwargs
UpperCAmelCase__ : Optional[Any] = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase__ : Any = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase__ : Tuple = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase__ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase__ : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ ( self : List[str] , _A : int , _A : int , _A : int ):
'''simple docstring'''
return True
def lowercase_ ( self : List[str] , *_A : List[Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase__ : Tuple = ([prefix + arg for arg in args[0]],)
UpperCAmelCase__ : Dict = True
elif isinstance(args[0] , _A ):
UpperCAmelCase__ : List[str] = (prefix + args[0],)
UpperCAmelCase__ : Dict = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
UpperCAmelCase__ : List[str] = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : int , *_A : int , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def lowercase_ ( self : Tuple , _A : str , **_A : Any ):
'''simple docstring'''
if self.framework == "pt":
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase__ : str = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase__ : Optional[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase__ : int = self.model.generate(**_A , **_A )
UpperCAmelCase__ : List[Any] = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase__ : str = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase__ : Union[str, Any] = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowercase_ ( self : Union[str, Any] , _A : Any , _A : Any=ReturnType.TEXT , _A : Optional[Any]=False ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase__ : Any = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase__ : List[str] = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'summary'
def __call__( self : Tuple , *_A : Optional[int] , **_A : Optional[int] ):
'''simple docstring'''
return super().__call__(*_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'translation'
def lowercase_ ( self : Tuple , _A : int , _A : int , _A : int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowercase_ ( self : List[Any] , *_A : Any , _A : Dict=TruncationStrategy.DO_NOT_TRUNCATE , _A : str=None , _A : Any=None ):
'''simple docstring'''
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def lowercase_ ( self : Union[str, Any] , _A : Optional[Any]=None , _A : Optional[int]=None , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
UpperCAmelCase__ : int = src_lang
if tgt_lang is not None:
UpperCAmelCase__ : Union[str, Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase__ : List[Any] = kwargs.get('''task''' , self.task )
UpperCAmelCase__ : int = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
UpperCAmelCase__ : Any = items[1]
UpperCAmelCase__ : Optional[int] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , *_A : int , **_A : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*_A , **_A )
| 75 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 1 |
'''simple docstring'''
import functools
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Validation
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(lowerCAmelCase__ ) != 3 or not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(lowerCAmelCase__ ) == 0:
return 0
if min(lowerCAmelCase__ ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(lowerCAmelCase__ ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase__ : str = set(lowerCAmelCase__ )
@functools.cache
def dynamic_programming(lowerCAmelCase__ ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = BarthezTokenizer
lowerCAmelCase__ = BarthezTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : List[Any] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_A )
UpperCAmelCase__ : Dict = tokenizer
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''<pad>'''
UpperCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 101_122 )
def lowercase_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase__ : Union[str, Any] = [0, 57, 3_018, 70_307, 91, 2]
UpperCAmelCase__ : List[str] = self.tokenizer(
_A , max_length=len(_A ) , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCAmelCase__ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
UpperCAmelCase__ : Any = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : Optional[int] = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : Tuple = tokenizer.encode(_A )
UpperCAmelCase__ : int = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = {'''input_ids''': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCAmelCase__ : Optional[Any] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_A , )
| 75 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 | 1 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__a )} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'The input training data file (a text file).'} )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCAmelCase__ = field(default=__a , metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCAmelCase__ = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCAmelCase__ = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
lowerCAmelCase__ = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCAmelCase__ = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Tuple:
def _dataset(lowerCAmelCase__ , lowerCAmelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCAmelCase__ , file_path=lowerCAmelCase__ , block_size=args.block_size , ref_path=lowerCAmelCase__ , )
return LineByLineTextDataset(tokenizer=lowerCAmelCase__ , file_path=lowerCAmelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCAmelCase__ , file_path=lowerCAmelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCAmelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCAmelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def a__ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase__ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase__ : Dict = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase__ : List[Any] = AutoModelWithLMHead.from_config(lowerCAmelCase__ )
model.resize_token_embeddings(len(lowerCAmelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase__ : str = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase__ : Union[str, Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase__ : Union[str, Any] = (
get_dataset(lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase__ : List[Any] = (
get_dataset(lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , evaluate=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase__ : str = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCAmelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase__ : Optional[int] = DataCollatorForWholeWordMask(
tokenizer=lowerCAmelCase__ , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase__ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase__ : Union[str, Any] = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , prediction_loss_only=lowerCAmelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase__ : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCAmelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase__ : Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase__ : Tuple = trainer.evaluate()
UpperCAmelCase__ : Any = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase__ : List[str] = {'''perplexity''': perplexity}
UpperCAmelCase__ : List[str] = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCAmelCase__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCAmelCase__ )
return results
def a__ ( lowerCAmelCase__ ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 1 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( lowerCAmelCase__ ) -> Any:
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCamelCase_ :
def lowercase_ ( self : str , _A : Union[str, Any] , _A : List[Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
def lowercase_ ( self : List[Any] , _A : np.ndarray , _A : np.ndarray , _A : float ):
'''simple docstring'''
UpperCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Any , _A : Any=None , **_A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
UpperCAmelCase__ : Optional[int] = FlaxVisionTextDualEncoderModel(_A )
UpperCAmelCase__ : Union[str, Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : str , _A : str , _A : int=None , **_A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.get_vision_text_model(_A , _A )
UpperCAmelCase__ : int = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
UpperCAmelCase__ : Any = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase_ ( self : Dict , _A : Optional[int] , _A : Union[str, Any] , _A : List[str] , _A : str , _A : Optional[int]=None , **_A : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.get_vision_text_model(_A , _A )
UpperCAmelCase__ : Optional[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase__ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
UpperCAmelCase__ : Any = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
UpperCAmelCase__ : Dict = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
UpperCAmelCase__ : Any = after_output[0]
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1e-3 )
def lowercase_ ( self : Tuple , _A : Dict , _A : Optional[Any] , _A : List[Any] , _A : List[Any] , _A : Dict=None , **_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_vision_text_model(_A , _A )
UpperCAmelCase__ : List[str] = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
UpperCAmelCase__ : Optional[Any] = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
UpperCAmelCase__ : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : Dict = to_atuple(vision_model.config.image_size )
UpperCAmelCase__ : List[Any] = to_atuple(vision_model.config.patch_size )
UpperCAmelCase__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase__ : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase_ ( self : List[str] , _A : Dict , _A : Any , _A : Tuple ):
'''simple docstring'''
pt_model.to(_A )
pt_model.eval()
# prepare inputs
UpperCAmelCase__ : Any = inputs_dict
UpperCAmelCase__ : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase__ : int = pt_model(**_A ).to_tuple()
UpperCAmelCase__ : List[Any] = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_A , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
UpperCAmelCase__ : int = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A )
UpperCAmelCase__ : Tuple = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_A , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A )
pt_model_loaded.to(_A )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase__ : Any = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4e-2 )
def lowercase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
UpperCAmelCase__ : Optional[int] = VisionTextDualEncoderModel(_A )
UpperCAmelCase__ : Tuple = FlaxVisionTextDualEncoderModel(_A )
UpperCAmelCase__ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A )
UpperCAmelCase__ : Union[str, Any] = fx_state
self.check_pt_flax_equivalence(_A , _A , _A )
def lowercase_ ( self : List[Any] , _A : Optional[Any] , _A : int , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
UpperCAmelCase__ : Optional[Any] = VisionTextDualEncoderModel(_A )
UpperCAmelCase__ : int = FlaxVisionTextDualEncoderModel(_A )
UpperCAmelCase__ : Optional[Any] = load_flax_weights_in_pytorch_model(_A , fx_model.params )
self.check_pt_flax_equivalence(_A , _A , _A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@is_pt_flax_cross_test
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[int] = config_inputs_dict.pop('''vision_config''' )
UpperCAmelCase__ : List[str] = config_inputs_dict.pop('''text_config''' )
UpperCAmelCase__ : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(_A , _A , _A )
self.check_equivalence_flax_to_pt(_A , _A , _A )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
UpperCAmelCase__ : Union[str, Any] = model_a(**_A )
UpperCAmelCase__ : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
UpperCAmelCase__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(_A )
UpperCAmelCase__ : str = model_a(**_A )
UpperCAmelCase__ : Optional[Any] = after_outputs[0]
UpperCAmelCase__ : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1e-5 )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , )
UpperCAmelCase__ : List[str] = 13
UpperCAmelCase__ : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase__ : List[str] = random_attention_mask([batch_size, 4] )
UpperCAmelCase__ : Tuple = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase_ ( self : int , _A : Union[str, Any] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = FlaxViTModel(_A )
UpperCAmelCase__ : List[Any] = FlaxBertModel(_A )
return vision_model, text_model
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = FlaxViTModelTester(self )
UpperCAmelCase__ : List[str] = FlaxBertModelTester(self )
UpperCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Dict = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCamelCase_ ( __a , unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , )
UpperCAmelCase__ : List[Any] = 13
UpperCAmelCase__ : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase__ : Tuple = random_attention_mask([batch_size, 4] )
UpperCAmelCase__ : Any = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase_ ( self : int , _A : Any , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxCLIPVisionModel(_A )
UpperCAmelCase__ : int = FlaxBertModel(_A )
return vision_model, text_model
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = FlaxCLIPVisionModelTester(self )
UpperCAmelCase__ : List[str] = FlaxBertModelTester(self )
UpperCAmelCase__ : Tuple = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
UpperCAmelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
UpperCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase__ : Optional[int] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_A , padding=_A , return_tensors='''np''' )
UpperCAmelCase__ : List[str] = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase__ : Tuple = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1e-3 ) )
| 75 |
'''simple docstring'''
import random
from typing import Any
def a__ ( lowerCAmelCase__ ) -> list[Any]:
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 75 | 1 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
UpperCamelCase__ = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = CamembertTokenizer
lowerCAmelCase__ = CamembertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Optional[int] = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''<pad>'''
UpperCAmelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1_004 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase__ : Any = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase__ : Any = tokenizer.encode(_A )
UpperCAmelCase__ : Tuple = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase__ : Tuple = tokenizer.convert_ids_to_tokens(_A )
UpperCAmelCase__ : int = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase__ : List[str] = tokenizer.tokenize(_A )
UpperCAmelCase__ : Any = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : Any = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase__ : Optional[int] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : str = self.get_rust_tokenizer()
UpperCAmelCase__ : Optional[int] = tokenizer.encode(_A )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = {'''input_ids''': [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase__ : Tuple = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=_A , )
| 75 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase__ = Features({'text': Value('string' )} )
lowerCAmelCase__ = Features({} )
lowerCAmelCase__ = "text"
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return {self.text_column: "text"}
| 75 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 1 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | 1 |
'''simple docstring'''
UpperCamelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def a__ ( lowerCAmelCase__ ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = ''''''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase__ : List[Any] = b'''=''' * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
UpperCAmelCase__ : Union[str, Any] = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def a__ ( lowerCAmelCase__ ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = (
'''argument should be a bytes-like object or ASCII string, '''
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
UpperCAmelCase__ : List[Any] = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase__ : int = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase__ : Optional[int] = encoded_data[:-padding]
UpperCAmelCase__ : Any = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase__ : str = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase__ : Union[str, Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase__ ) , 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 1 |
'''simple docstring'''
from math import pi
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 75 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> str:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
UpperCAmelCase__ : Optional[int] = len(lowerCAmelCase__ )
UpperCAmelCase__ : int = max(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = min(lowerCAmelCase__ )
# create the counting array
UpperCAmelCase__ : Optional[Any] = coll_max + 1 - coll_min
UpperCAmelCase__ : Optional[int] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCAmelCase__ ):
UpperCAmelCase__ : str = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase__ : Tuple = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCAmelCase__ ) ):
UpperCAmelCase__ : List[Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a__ ( lowerCAmelCase__ ) -> Dict:
return "".join([chr(lowerCAmelCase__ ) for i in counting_sort([ord(lowerCAmelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 75 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'switch_transformers'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Any , _A : List[Any]=32_128 , _A : Dict=768 , _A : List[str]=64 , _A : List[Any]=2_048 , _A : List[Any]=64 , _A : int=12 , _A : int=3 , _A : List[Any]=12 , _A : Any=3 , _A : Optional[Any]=12 , _A : Dict=8 , _A : int=False , _A : int=0.0_1 , _A : int="float32" , _A : Union[str, Any]=False , _A : Tuple=32 , _A : Any=128 , _A : str=0.1 , _A : List[Any]=1e-6 , _A : str=0.0_0_1 , _A : Optional[int]=0.0_0_1 , _A : Optional[int]=1.0 , _A : List[Any]="relu" , _A : List[str]=True , _A : Optional[int]=False , _A : int=True , _A : List[Any]=0 , _A : List[str]=1 , **_A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Union[str, Any] = d_model
UpperCAmelCase__ : List[Any] = d_kv
UpperCAmelCase__ : List[Any] = d_ff
UpperCAmelCase__ : Any = num_sparse_encoder_layers
UpperCAmelCase__ : List[str] = num_layers
UpperCAmelCase__ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase__ : List[str] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
UpperCAmelCase__ : int = self.num_layers // self.num_sparse_encoder_layers
else:
UpperCAmelCase__ : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
UpperCAmelCase__ : Optional[int] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
UpperCAmelCase__ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers
UpperCAmelCase__ : Tuple = num_heads
UpperCAmelCase__ : Tuple = num_experts
UpperCAmelCase__ : Union[str, Any] = expert_capacity
UpperCAmelCase__ : str = router_bias
UpperCAmelCase__ : Any = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase__ : Any = router_dtype
UpperCAmelCase__ : str = router_ignore_padding_tokens
UpperCAmelCase__ : str = relative_attention_num_buckets
UpperCAmelCase__ : str = relative_attention_max_distance
UpperCAmelCase__ : Optional[Any] = dropout_rate
UpperCAmelCase__ : Tuple = layer_norm_epsilon
UpperCAmelCase__ : Optional[int] = initializer_factor
UpperCAmelCase__ : int = feed_forward_proj
UpperCAmelCase__ : Optional[int] = use_cache
UpperCAmelCase__ : Optional[int] = add_router_probs
UpperCAmelCase__ : Any = router_z_loss_coef
UpperCAmelCase__ : Tuple = router_aux_loss_coef
UpperCAmelCase__ : List[str] = self.feed_forward_proj.split('''-''' )
UpperCAmelCase__ : Union[str, Any] = act_info[-1]
UpperCAmelCase__ : Any = act_info[0] == '''gated'''
if len(_A ) > 1 and act_info[0] != "gated" or len(_A ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase__ : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
| 75 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 1 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
debug_launcher(test_script.main )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 75 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 1 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self : int , _A : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_A ) != 0:
UpperCAmelCase__ : Any = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_A ) != cols:
raise error
for value in row:
if not isinstance(_A , (int, float) ):
raise error
UpperCAmelCase__ : Union[str, Any] = rows
else:
UpperCAmelCase__ : str = []
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return len(self.rows )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return len(self.rows[0] )
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return self.order[0] == self.order[1]
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ ( self : Dict ):
'''simple docstring'''
return bool(self.determinant() )
def lowercase_ ( self : int , _A : int , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_A ).determinant()
def lowercase_ ( self : Union[str, Any] , _A : int , _A : int ):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(_A , _A )
return -1 * self.get_minor(_A , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
return Matrix(
[
[self.get_minor(_A , _A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self : int ):
'''simple docstring'''
return str(self.rows )
def __str__( self : Any ):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_A ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def lowercase_ ( self : Tuple , _A : list[int] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_A , _A ):
raise type_error
for value in row:
if not isinstance(_A , (int, float) ):
raise type_error
if len(_A ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_A )
else:
UpperCAmelCase__ : List[Any] = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ ( self : Any , _A : list[int] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_A , _A ):
raise type_error
for value in column:
if not isinstance(_A , (int, float) ):
raise type_error
if len(_A ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
UpperCAmelCase__ : Dict = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ : Union[str, Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Optional[int] , _A : object ):
'''simple docstring'''
if not isinstance(_A , _A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Optional[int] , _A : object ):
'''simple docstring'''
return not self == other
def __neg__( self : str ):
'''simple docstring'''
return self * -1
def __add__( self : Dict , _A : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : int , _A : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple , _A : Matrix | int | float ):
'''simple docstring'''
if isinstance(_A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_A , _A ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_A , _A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self : Tuple , _A : int ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
UpperCAmelCase__ : List[str] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ ( cls : str , _A : list[int] , _A : list[int] ):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 | 1 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Any = multiprocessing.Manager()
UpperCAmelCase__ : Optional[Any] = manager.list()
UpperCAmelCase__ : List[Any] = multiprocessing.Process(target=lowerCAmelCase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase__ : Any = shutil.rmtree
UpperCAmelCase__ : List[Any] = os.rmdir
UpperCAmelCase__ : Union[str, Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase__ : Union[str, Any] = {}
with swallow_io():
with time_limit(lowerCAmelCase__ ):
exec(lowerCAmelCase__ , lowerCAmelCase__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
UpperCAmelCase__ : Dict = rmtree
UpperCAmelCase__ : str = rmdir
UpperCAmelCase__ : str = chdir
@contextlib.contextmanager
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
def signal_handler(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , lowerCAmelCase__ )
signal.signal(signal.SIGALRM , lowerCAmelCase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Union[str, Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCAmelCase__ ):
with contextlib.redirect_stderr(lowerCAmelCase__ ):
with redirect_stdin(lowerCAmelCase__ ):
yield
@contextlib.contextmanager
def a__ ( ) -> List[str]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCAmelCase__ ):
yield dirname
class lowerCamelCase_ ( __a ):
pass
class lowerCamelCase_ ( io.StringIO ):
def lowercase_ ( self : List[str] , *_A : Optional[Any] , **_A : int ):
'''simple docstring'''
raise OSError
def lowercase_ ( self : Any , *_A : Tuple , **_A : List[str] ):
'''simple docstring'''
raise OSError
def lowercase_ ( self : Optional[Any] , *_A : List[str] , **_A : Union[str, Any] ):
'''simple docstring'''
raise OSError
def lowercase_ ( self : str , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
return False
class lowerCamelCase_ ( contextlib._RedirectStream ): # type: ignore
lowerCAmelCase__ = 'stdin'
@contextlib.contextmanager
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
if root == ".":
yield
return
UpperCAmelCase__ : Union[str, Any] = os.getcwd()
os.chdir(lowerCAmelCase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase__ : int = None
UpperCAmelCase__ : int = None
import os
UpperCAmelCase__ : Dict = '''1'''
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Union[str, Any] = None
import shutil
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Union[str, Any] = None
import subprocess
UpperCAmelCase__ : str = None # type: ignore
UpperCAmelCase__ : Optional[Any] = None
import sys
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = None
| 75 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 1 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def a__ ( lowerCAmelCase__ = "isbn/0140328726" ) -> dict:
UpperCAmelCase__ : str = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCAmelCase__ : int = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowerCAmelCase__ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def a__ ( lowerCAmelCase__ ) -> dict:
UpperCAmelCase__ : Tuple = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCAmelCase__ : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase__ : Any = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCAmelCase__ : Dict = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = ''', '''.join(lowerCAmelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCamelCase__ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
UpperCamelCase__ = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 75 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
UpperCamelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
lowerCAmelCase__ = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'A csv or a json file containing the training data.'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'A csv or a json file containing the validation data.'} )
lowerCAmelCase__ = field(default=__a , metadata={'help': 'A csv or a json file containing the test data.'} )
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
UpperCAmelCase__ : List[Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCAmelCase__ : Union[str, Any] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def a__ ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase__ : List[str] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase__ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase__ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCAmelCase__ : Optional[Any] = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCAmelCase__ : Dict = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase__ : Any = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCAmelCase__ : int = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
UpperCAmelCase__ : Any = load_dataset('''csv''' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCAmelCase__ : Any = load_dataset('''json''' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCAmelCase__ : Optional[int] = raw_datasets['''train'''].features['''label'''].names
UpperCAmelCase__ : Tuple = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
UpperCAmelCase__ : Optional[int] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase__ , )
UpperCAmelCase__ : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase__ : List[Any] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase__ : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCAmelCase__ : Dict = {'''Refused''': 0, '''Entailed''': 1}
UpperCAmelCase__ : str = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase__ : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
UpperCAmelCase__ : Any = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
UpperCAmelCase__ : Optional[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
UpperCAmelCase__ : int = examples['''statement''']
UpperCAmelCase__ : Dict = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
UpperCAmelCase__ : str = tokenizer(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
UpperCAmelCase__ : Any = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
UpperCAmelCase__ : int = raw_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase__ : Optional[int] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase__ : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase__ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase__ : Tuple = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
UpperCAmelCase__ : Union[str, Any] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
UpperCAmelCase__ : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase__ ) else p.predictions
UpperCAmelCase__ : List[Any] = np.argmax(lowerCAmelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase__ : Union[str, Any] = default_data_collator
elif training_args.fpaa:
UpperCAmelCase__ : str = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 )
else:
UpperCAmelCase__ : Optional[Any] = None
# Initialize our Trainer
UpperCAmelCase__ : List[Any] = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ : Any = last_checkpoint
UpperCAmelCase__ : List[str] = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = train_result.metrics
UpperCAmelCase__ : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
UpperCAmelCase__ : Any = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowerCAmelCase__ )
trainer.save_metrics('''train''' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase__ : Any = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCAmelCase__ : str = predict_dataset.remove_columns('''label''' )
UpperCAmelCase__ : Dict = trainer.predict(lowerCAmelCase__ , metric_key_prefix='''predict''' ).predictions
UpperCAmelCase__ : Tuple = np.argmax(lowerCAmelCase__ , axis=1 )
UpperCAmelCase__ : Tuple = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : List[str] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
UpperCAmelCase__ : Dict = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : int , _A : CLIPSegForImageSegmentation , _A : CLIPSegProcessor , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _A : StableDiffusionSafetyChecker , _A : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase__ : Union[str, Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _A , standard_warn=_A )
UpperCAmelCase__ : Optional[int] = dict(scheduler.config )
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Optional[Any] = FrozenDict(_A )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase__ : List[str] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _A , standard_warn=_A )
UpperCAmelCase__ : str = dict(scheduler.config )
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Union[str, Any] = FrozenDict(_A )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_A , segmentation_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , safety_checker=_A , feature_extractor=_A , )
def lowercase_ ( self : List[str] , _A : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.enable_attention_slicing(_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase__ : List[Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , _A : Union[str, List[str]] , _A : Union[torch.FloatTensor, PIL.Image.Image] , _A : str , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCAmelCase__ : str = self.segmentation_model(**_A )
UpperCAmelCase__ : Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase__ : Any = self.numpy_to_pil(_A )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase__ : List[Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_A , image=_A , mask_image=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , )
| 75 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# Initialise PyTorch model
UpperCAmelCase__ : Optional[int] = BertConfig.from_json_file(lowerCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase__ : Optional[Any] = BertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 75 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return np.maximum(0 , lowerCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 75 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = OpenAIGPTTokenizer
lowerCAmelCase__ = OpenAIGPTTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase__ : int = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : str = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_A ) )
def lowercase_ ( self : int , _A : List[Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : Any = '''lower'''
UpperCAmelCase__ : List[Any] = ['''low''', '''er</w>''']
UpperCAmelCase__ : List[str] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = tokens + ['''<unk>''']
UpperCAmelCase__ : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def lowercase_ ( self : List[Any] , _A : List[str]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase__ : Any = '''This is a simple input'''
UpperCAmelCase__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase__ : List[Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase__ : Union[str, Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCamelCase_ ( __a ):
pass
| 75 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCamelCase__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
UpperCamelCase__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
UpperCamelCase__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
return float((preds == labels).mean() )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="binary" ) -> int:
UpperCAmelCase__ : Optional[int] = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = float(fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ , average=lowerCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Tuple = {}
for id_pred, label in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
UpperCAmelCase__ : Dict = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase__ : List[Any] = [(pred, label)]
UpperCAmelCase__ , UpperCAmelCase__ : Any = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = zip(*lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ , average='''macro''' )
fas.append(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase__ ) )
ems.append(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = float(sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) )
UpperCAmelCase__ : Any = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = float(fa_score(y_true=lowerCAmelCase__ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowercase_ ( self : Optional[Any] , _A : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_A , _A )}
elif self.config_name == "cb":
return acc_and_fa(_A , _A , fa_avg='''macro''' )
elif self.config_name == "record":
UpperCAmelCase__ : List[Any] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
UpperCAmelCase__ : str = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_A , _A )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_A , _A )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_A , _A )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 75 |
'''simple docstring'''
import random
from typing import Any
def a__ ( lowerCAmelCase__ ) -> list[Any]:
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 75 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'trajectory_transformer'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , _A : Any=100 , _A : int=5 , _A : Any=1 , _A : int=1 , _A : Dict=249 , _A : Dict=6 , _A : List[str]=17 , _A : int=25 , _A : Tuple=4 , _A : int=4 , _A : Any=128 , _A : Tuple=0.1 , _A : Optional[int]=0.1 , _A : Any=0.1 , _A : List[str]=0.0_0_0_6 , _A : Union[str, Any]=512 , _A : Tuple=0.0_2 , _A : str=1e-12 , _A : List[str]=1 , _A : List[Any]=True , _A : List[Any]=1 , _A : Union[str, Any]=50_256 , _A : int=50_256 , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : List[str] = action_weight
UpperCAmelCase__ : Optional[int] = reward_weight
UpperCAmelCase__ : str = value_weight
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Optional[Any] = block_size
UpperCAmelCase__ : Dict = action_dim
UpperCAmelCase__ : Optional[int] = observation_dim
UpperCAmelCase__ : Dict = transition_dim
UpperCAmelCase__ : Dict = learning_rate
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Optional[int] = n_head
UpperCAmelCase__ : str = n_embd
UpperCAmelCase__ : int = embd_pdrop
UpperCAmelCase__ : List[Any] = attn_pdrop
UpperCAmelCase__ : Any = resid_pdrop
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : List[str] = kaiming_initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 75 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCamelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
lowerCAmelCase__ = field(default=__a , metadata={'help': 'Whether to SortishSamler or not.'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCAmelCase__ = field(default=__a , metadata={'help': 'whether to use adafactor'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
lowerCAmelCase__ = field(default=__a , metadata={'help': 'Dropout probability. Goes into model.config.'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
lowerCAmelCase__ = field(
default='linear' , metadata={'help': F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 75 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : list[str] ):
'''simple docstring'''
UpperCAmelCase__ : list[dict] = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(_A )
self.set_fail_transitions()
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ ( self : Union[str, Any] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 0
for character in keyword:
UpperCAmelCase__ : Tuple = self.find_next_state(_A , _A )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ : int = len(self.adlist ) - 1
else:
UpperCAmelCase__ : str = next_state
self.adlist[current_state]["output"].append(_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_A )
UpperCAmelCase__ : int = 0
while q:
UpperCAmelCase__ : Tuple = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_A )
UpperCAmelCase__ : Dict = self.adlist[r]['''fail_state''']
while (
self.find_next_state(_A , self.adlist[child]['''value'''] ) is None
and state != 0
):
UpperCAmelCase__ : Union[str, Any] = self.adlist[state]['''fail_state''']
UpperCAmelCase__ : Tuple = self.find_next_state(
_A , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : int = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : dict = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ : Optional[Any] = 0
for i in range(len(_A ) ):
while (
self.find_next_state(_A , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ : int = self.adlist[current_state]['''fail_state''']
UpperCAmelCase__ : Dict = self.find_next_state(_A , string[i] )
if next_state is None:
UpperCAmelCase__ : Union[str, Any] = 0
else:
UpperCAmelCase__ : Any = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ : str = []
result[key].append(i - len(_A ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase_ ( yaml.SafeLoader ):
def lowercase_ ( self : Optional[Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCAmelCase__ : List[str] = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys]
UpperCAmelCase__ : str = Counter(_A )
UpperCAmelCase__ : Dict = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = super().construct_mapping(_A , deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def a__ ( lowerCAmelCase__ ) -> Tuple[Optional[str], str]:
UpperCAmelCase__ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCAmelCase__ : Any = full_content[1:].index('''---''' ) + 1
UpperCAmelCase__ : Tuple = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase__ )
class lowerCamelCase_ ( __a ):
# class attributes
lowerCAmelCase__ = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowercase_ ( cls : str , _A : Path ):
'''simple docstring'''
with open(_A , encoding='''utf-8''' ) as readme_file:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def lowercase_ ( self : Optional[int] , _A : Path ):
'''simple docstring'''
if path.exists():
with open(_A , encoding='''utf-8''' ) as readme_file:
UpperCAmelCase__ : Union[str, Any] = readme_file.read()
else:
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : str = self._to_readme(_A )
with open(_A , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(_A )
def lowercase_ ( self : Dict , _A : Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = _split_yaml_from_readme(_A )
UpperCAmelCase__ : Any = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
UpperCAmelCase__ : str = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def lowercase_ ( cls : int , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCAmelCase__ : List[Any] = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_A , allow_unicode=_A , encoding='''utf-8''' , ).decode('''utf-8''' )
UpperCamelCase__ = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCamelCase__ = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
UpperCamelCase__ = ap.parse_args()
UpperCamelCase__ = Path(args.readme_filepath)
UpperCamelCase__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 75 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 1 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase_ :
def __init__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {}
def lowercase_ ( self : Optional[int] , _A : Optional[int] , _A : str , _A : List[str]=1 ):
'''simple docstring'''
if self.graph.get(_A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase__ : Tuple = [[w, v]]
if not self.graph.get(_A ):
UpperCAmelCase__ : List[Any] = []
def lowercase_ ( self : Dict ):
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self : int , _A : List[str] , _A : str ):
'''simple docstring'''
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
def lowercase_ ( self : Dict , _A : List[Any]=-2 , _A : Optional[int]=-1 ):
'''simple docstring'''
if s == d:
return []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : int = []
if s == -2:
UpperCAmelCase__ : Tuple = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
UpperCAmelCase__ : Dict = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : str = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowercase_ ( self : Tuple , _A : int=-1 ):
'''simple docstring'''
if c == -1:
UpperCAmelCase__ : Tuple = floor(random() * 10_000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCAmelCase__ : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowercase_ ( self : Tuple , _A : List[str]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : int = deque()
UpperCAmelCase__ : Optional[int] = []
if s == -2:
UpperCAmelCase__ : Union[str, Any] = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
UpperCAmelCase__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase_ ( self : Dict , _A : Union[str, Any] ):
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self : Dict , _A : Optional[Any]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = []
if s == -2:
UpperCAmelCase__ : List[str] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Any = s
UpperCAmelCase__ : Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_A ) != 0:
UpperCAmelCase__ : Union[str, Any] = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return sorted_nodes
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : int = -2
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = s
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Dict = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Union[str, Any] = True
if len(_A ) != 0:
UpperCAmelCase__ : List[str] = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[Any] = False
indirect_parents.append(_A )
UpperCAmelCase__ : Tuple = s
UpperCAmelCase__ : Tuple = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Dict = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Union[str, Any] = -2
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = s
UpperCAmelCase__ : str = False
UpperCAmelCase__ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : List[str] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Union[str, Any] = True
if len(_A ) != 0:
UpperCAmelCase__ : int = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Dict = False
indirect_parents.append(_A )
UpperCAmelCase__ : int = s
UpperCAmelCase__ : Optional[int] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowercase_ ( self : Dict , _A : Any=-2 , _A : List[str]=-1 ):
'''simple docstring'''
UpperCAmelCase__ : Any = time()
self.dfs(_A , _A )
UpperCAmelCase__ : Union[str, Any] = time()
return end - begin
def lowercase_ ( self : Union[str, Any] , _A : Optional[int]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = time()
self.bfs(_A )
UpperCAmelCase__ : Any = time()
return end - begin
class lowerCamelCase_ :
def __init__( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {}
def lowercase_ ( self : List[Any] , _A : List[str] , _A : Optional[int] , _A : Any=1 ):
'''simple docstring'''
if self.graph.get(_A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase__ : Any = [[w, v]]
# add the other way
if self.graph.get(_A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase__ : List[str] = [[w, u]]
def lowercase_ ( self : Optional[Any] , _A : Optional[int] , _A : Dict ):
'''simple docstring'''
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
# the other way round
if self.graph.get(_A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_A )
def lowercase_ ( self : Optional[Any] , _A : int=-2 , _A : Optional[int]=-1 ):
'''simple docstring'''
if s == d:
return []
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Dict = []
if s == -2:
UpperCAmelCase__ : Optional[Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
UpperCAmelCase__ : str = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowercase_ ( self : str , _A : Tuple=-1 ):
'''simple docstring'''
if c == -1:
UpperCAmelCase__ : str = floor(random() * 10_000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCAmelCase__ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowercase_ ( self : Tuple , _A : List[str]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = deque()
UpperCAmelCase__ : Optional[int] = []
if s == -2:
UpperCAmelCase__ : int = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
UpperCAmelCase__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self : Tuple , _A : int ):
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Union[str, Any] = -2
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[Any] = s
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Tuple = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Dict = True
if len(_A ) != 0:
UpperCAmelCase__ : Optional[int] = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[int] = False
indirect_parents.append(_A )
UpperCAmelCase__ : Optional[int] = s
UpperCAmelCase__ : Dict = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Dict = -2
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Optional[Any] = s
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Union[str, Any] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : List[str] = True
if len(_A ) != 0:
UpperCAmelCase__ : Any = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[Any] = False
indirect_parents.append(_A )
UpperCAmelCase__ : List[str] = s
UpperCAmelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self : List[Any] , _A : Any=-2 , _A : List[str]=-1 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = time()
self.dfs(_A , _A )
UpperCAmelCase__ : Tuple = time()
return end - begin
def lowercase_ ( self : int , _A : str=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = time()
self.bfs(_A )
UpperCAmelCase__ : int = time()
return end - begin
| 75 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_A : Tuple , **_A : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Dict , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : str , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *_A : int , **_A : str ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *_A : str , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *_A : Any , **_A : Tuple ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Any , *_A : Union[str, Any] , **_A : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *_A : List[Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : str , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : Optional[int] , **_A : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : List[Any] , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Tuple , **_A : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *_A : Optional[int] , **_A : int ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *_A : List[Any] , **_A : int ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 75 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowerCAmelCase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowerCAmelCase__ = 'image_qa'
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = AutoModelForVisualQuestionAnswering
lowerCAmelCase__ = ['image', 'text']
lowerCAmelCase__ = ['text']
def __init__( self : Dict , *_A : int , **_A : Tuple ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*_A , **_A )
def lowercase_ ( self : str , _A : "Image" , _A : str ):
'''simple docstring'''
return self.pre_processor(_A , _A , return_tensors='''pt''' )
def lowercase_ ( self : List[str] , _A : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
return self.model(**_A ).logits
def lowercase_ ( self : int , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 75 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCamelCase__ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCamelCase__ = 0
UpperCamelCase__ = 0Xe_000
UpperCamelCase__ = 0Xe_001
UpperCamelCase__ = 0Xe_002
UpperCamelCase__ = 0Xe_003
UpperCamelCase__ = 0Xe_004
# Maps special codepoints to human-readable names.
UpperCamelCase__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCamelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , _A : List[Any]=chr(_A ) , _A : Optional[Any]=chr(_A ) , _A : Dict=chr(_A ) , _A : Optional[int]=chr(_A ) , _A : Optional[int]=chr(_A ) , _A : int=chr(_A ) , _A : Tuple=False , _A : str=2_048 , **_A : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCAmelCase__ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
UpperCAmelCase__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCAmelCase__ : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , model_max_length=_A , **_A , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase__ : int = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase__ : Dict = UNICODE_VOCAB_SIZE
UpperCAmelCase__ : List[Any] = len(self._special_codepoints )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self._unicode_vocab_size
def lowercase_ ( self : Tuple , _A : str ):
'''simple docstring'''
return list(_A )
def lowercase_ ( self : int , _A : str ):
'''simple docstring'''
try:
return ord(_A )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_A )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def lowercase_ ( self : Any , _A : Optional[Any] ):
'''simple docstring'''
return "".join(_A )
def lowercase_ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : Any = [self.cls_token_id]
UpperCAmelCase__ : Tuple = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
UpperCAmelCase__ : Tuple = [1] + ([0] * len(_A )) + [1]
if token_ids_a is not None:
result += ([0] * len(_A )) + [1]
return result
def lowercase_ ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowercase_ ( self : Optional[Any] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
return ()
| 75 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 1 |
'''simple docstring'''
import functools
from typing import Any
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
# Validation
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not all(
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
UpperCAmelCase__ : dict[str, Any] = {}
UpperCAmelCase__ : Optional[Any] = '''WORD_KEEPER'''
for word in words:
UpperCAmelCase__ : Optional[Any] = trie
for c in word:
if c not in trie_node:
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Union[str, Any] = trie_node[c]
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase__ )
# Dynamic programming method
@functools.cache
def is_breakable(lowerCAmelCase__ ) -> bool:
if index == len_string:
return True
UpperCAmelCase__ : int = trie
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : str = trie_node.get(string[i] , lowerCAmelCase__ )
if trie_node is None:
return False
if trie_node.get(lowerCAmelCase__ , lowerCAmelCase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'naver-clova-ix/donut-base-finetuned-docvqa'
lowerCAmelCase__ = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
lowerCAmelCase__ = 'document_qa'
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = VisionEncoderDecoderModel
lowerCAmelCase__ = ['image', 'text']
lowerCAmelCase__ = ['text']
def __init__( self : int , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : "Image" , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase__ : Tuple = task_prompt.replace('''{user_input}''' , _A )
UpperCAmelCase__ : Optional[Any] = self.pre_processor.tokenizer(
_A , add_special_tokens=_A , return_tensors='''pt''' ).input_ids
UpperCAmelCase__ : Dict = self.pre_processor(_A , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowercase_ ( self : Optional[Any] , _A : List[str] ):
'''simple docstring'''
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_A , ).sequences
def lowercase_ ( self : str , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.pre_processor.batch_decode(_A )[0]
UpperCAmelCase__ : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
UpperCAmelCase__ : List[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
UpperCAmelCase__ : int = re.sub(R'''<.*?>''' , '''''' , _A , count=1 ).strip() # remove first task start token
UpperCAmelCase__ : Optional[Any] = self.pre_processor.tokenajson(_A )
return sequence["answer"]
| 75 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 75 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 | 1 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : str = min(lowerCAmelCase__ ) # min() finds the minimum value
UpperCAmelCase__ : Dict = max(lowerCAmelCase__ ) # max() finds the maximum value
UpperCAmelCase__ : Tuple = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCAmelCase__ : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCAmelCase__ : int = 0
for count in range(lowerCAmelCase__ ):
while holes[count] > 0:
holes[count] -= 1
UpperCAmelCase__ : str = count + min_val
i += 1
def a__ ( ) -> Dict:
UpperCAmelCase__ : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase__ )
print('''Sorted order is:''' , ''' '''.join(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''')
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
hf_model.apply_weight_norm()
UpperCAmelCase__ : Tuple = checkpoint['''input_conv.weight_g''']
UpperCAmelCase__ : Any = checkpoint['''input_conv.weight_v''']
UpperCAmelCase__ : Union[str, Any] = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase__ : int = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase__ : str = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase__ : List[Any] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase__ : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase__ : List[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase__ : Any = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase__ : Tuple = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase__ : Tuple = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase__ : List[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase__ : List[Any] = checkpoint['''output_conv.1.weight_g''']
UpperCAmelCase__ : Union[str, Any] = checkpoint['''output_conv.1.weight_v''']
UpperCAmelCase__ : List[Any] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> List[str]:
if config_path is not None:
UpperCAmelCase__ : Tuple = SpeechTaHifiGanConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : Union[str, Any] = SpeechTaHifiGanConfig()
UpperCAmelCase__ : Optional[int] = SpeechTaHifiGan(lowerCAmelCase__ )
UpperCAmelCase__ : Any = torch.load(lowerCAmelCase__ )
load_weights(orig_checkpoint['''model''']['''generator'''] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = np.load(lowerCAmelCase__ )
UpperCAmelCase__ : str = stats[0].reshape(-1 )
UpperCAmelCase__ : Optional[int] = stats[1].reshape(-1 )
UpperCAmelCase__ : Tuple = torch.from_numpy(lowerCAmelCase__ ).float()
UpperCAmelCase__ : Union[str, Any] = torch.from_numpy(lowerCAmelCase__ ).float()
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 75 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Optional[Any] , _A : str=13 , _A : List[Any]=32 , _A : Tuple=3 , _A : Tuple=4 , _A : List[str]=[10, 20, 30, 40] , _A : Tuple=[2, 2, 3, 2] , _A : List[str]=True , _A : List[str]=True , _A : Optional[int]=37 , _A : Union[str, Any]="gelu" , _A : int=10 , _A : List[str]=0.0_2 , _A : Tuple=["stage2", "stage3", "stage4"] , _A : Dict=3 , _A : Dict=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : int = num_channels
UpperCAmelCase__ : int = num_stages
UpperCAmelCase__ : List[Any] = hidden_sizes
UpperCAmelCase__ : Dict = depths
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Any = out_features
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : List[str] = num_stages
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : int ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase_ ( self : str ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_A , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowercase_ ( self : Any , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = UperNetForSemanticSegmentation(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = UperNetModelTester(self )
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Dict ):
'''simple docstring'''
return
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def lowercase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(_A : Any , _A : List[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = _config_zero_init(_A )
UpperCAmelCase__ : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
UpperCAmelCase__ : Optional[Any] = Image.open(lowerCAmelCase__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_A )
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Union[str, Any] = processor(images=_A , return_tensors='''pt''' ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_A )
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Any = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 ) )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_A )
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Dict = processor(images=_A , return_tensors='''pt''' ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**_A )
UpperCAmelCase__ : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 ) )
| 75 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : int = ''''''
for i in table:
res += inp[i - 1]
return res
def a__ ( lowerCAmelCase__ ) -> str:
return data[1:] + data[0]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : str = ''''''
for i in range(len(lowerCAmelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : Tuple = int('''0b''' + data[0] + data[-1] , 2 )
UpperCAmelCase__ : Tuple = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Any = message[:4]
UpperCAmelCase__ : Dict = message[4:]
UpperCAmelCase__ : int = apply_table(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = xor(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : int = apply_sbox(lowerCAmelCase__ , temp[:4] ) # noqa: E741
UpperCAmelCase__ : Any = apply_sbox(lowerCAmelCase__ , temp[4:] )
UpperCAmelCase__ : int = '''0''' * (2 - len(lowerCAmelCase__ )) + l # noqa: E741
UpperCAmelCase__ : Tuple = '''0''' * (2 - len(lowerCAmelCase__ )) + r
UpperCAmelCase__ : Tuple = apply_table(l + r , lowerCAmelCase__ )
UpperCAmelCase__ : str = xor(lowerCAmelCase__ , lowerCAmelCase__ )
return temp + right
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter 10 bit key: ''')
UpperCamelCase__ = input('''Enter 8 bit message: ''')
UpperCamelCase__ = [6, 3, 7, 4, 8, 5, 1_0, 9]
UpperCamelCase__ = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
UpperCamelCase__ = [2, 4, 3, 1]
UpperCamelCase__ = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase__ = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase__ = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase__ = apply_table(key, paa_table)
UpperCamelCase__ = temp[:5]
UpperCamelCase__ = temp[5:]
UpperCamelCase__ = left_shift(left)
UpperCamelCase__ = left_shift(right)
UpperCamelCase__ = apply_table(left + right, pa_table)
UpperCamelCase__ = left_shift(left)
UpperCamelCase__ = left_shift(right)
UpperCamelCase__ = left_shift(left)
UpperCamelCase__ = left_shift(right)
UpperCamelCase__ = apply_table(left + right, pa_table)
# encryption
UpperCamelCase__ = apply_table(message, IP)
UpperCamelCase__ = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ = temp[4:] + temp[:4]
UpperCamelCase__ = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
UpperCamelCase__ = apply_table(CT, IP)
UpperCamelCase__ = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ = temp[4:] + temp[:4]
UpperCamelCase__ = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 75 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 1 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = LEDTokenizer
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self : Dict , _A : Tuple=None , _A : Union[str, Any]=None , _A : str=None , _A : List[Any]="replace" , _A : Dict="<s>" , _A : Dict="</s>" , _A : Tuple="</s>" , _A : int="<s>" , _A : Optional[Any]="<unk>" , _A : str="<pad>" , _A : int="<mask>" , _A : int=False , _A : str=True , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Dict = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : int = add_prefix_space
UpperCAmelCase__ : Dict = pre_tok_class(**_A )
UpperCAmelCase__ : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : Tuple = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : str = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : List[str] = add_prefix_space
UpperCAmelCase__ : List[str] = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[str] = trim_offsets
UpperCAmelCase__ : Optional[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Optional[Any] = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Any ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : List[str] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : Optional[Any] = value
def lowercase_ ( self : Union[str, Any] , *_A : List[str] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : str , *_A : List[str] , **_A : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Any , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str=None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [self.sep_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Dict , _A : Union[Dict[str, EncodedInput], BatchEncoding] , _A : Optional[int] = None , _A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _A : Optional[int] = None , _A : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase__ : int = super()._pad(
encoded_inputs=_A , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : Tuple = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : int = len(encoded_inputs['''global_attention_mask'''] ) != len(_A )
if needs_to_be_padded:
UpperCAmelCase__ : int = len(_A ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Tuple = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 75 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''GLPNFeatureExtractor''']
UpperCamelCase__ = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> Dict:
UpperCAmelCase__ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ : str = ''''''
else:
UpperCAmelCase__ : Union[str, Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
UpperCAmelCase__ : List[str] = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : List[Any] = dct.pop(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : str = ViTMSNConfig()
UpperCAmelCase__ : Optional[int] = 10_00
UpperCAmelCase__ : List[Any] = '''datasets/huggingface/label-files'''
UpperCAmelCase__ : str = '''imagenet-1k-id2label.json'''
UpperCAmelCase__ : List[str] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ ) , '''r''' ) )
UpperCAmelCase__ : Union[str, Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : List[str] = idalabel
UpperCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCAmelCase__ : List[str] = 3_84
UpperCAmelCase__ : Dict = 15_36
UpperCAmelCase__ : Tuple = 6
elif "l16" in checkpoint_url:
UpperCAmelCase__ : List[str] = 10_24
UpperCAmelCase__ : str = 40_96
UpperCAmelCase__ : Tuple = 24
UpperCAmelCase__ : Optional[int] = 16
UpperCAmelCase__ : Optional[int] = 0.1
elif "b4" in checkpoint_url:
UpperCAmelCase__ : List[str] = 4
elif "l7" in checkpoint_url:
UpperCAmelCase__ : int = 7
UpperCAmelCase__ : int = 10_24
UpperCAmelCase__ : Dict = 40_96
UpperCAmelCase__ : Any = 24
UpperCAmelCase__ : str = 16
UpperCAmelCase__ : Any = 0.1
UpperCAmelCase__ : str = ViTMSNModel(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )['''target_encoder''']
UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = create_rename_keys(lowerCAmelCase__ , base_model=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , base_model=lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase__ : Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
UpperCAmelCase__ : Tuple = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCAmelCase__ : Optional[Any] = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCAmelCase__ : Any = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
UpperCAmelCase__ : Optional[int] = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
UpperCAmelCase__ : int = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
UpperCAmelCase__ : str = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
UpperCAmelCase__ : Tuple = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase__ , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 75 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCamelCase_ :
def __init__( self : List[str] , _A : List[Any] , _A : int=13 , _A : List[Any]=7 , _A : Optional[Any]=6 , _A : Tuple=17 , _A : Union[str, Any]=23 , _A : str=11 , _A : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Optional[Any] = seq_length
UpperCAmelCase__ : Tuple = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Dict = max_length
UpperCAmelCase__ : Any = is_training
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : Dict = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : str = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
UpperCAmelCase__ : Optional[Any] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowercase_ ( self : Dict ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowercase_ ( self : int , _A : int , _A : Optional[int] , _A : int , _A : int , _A : Union[str, Any] , _A : List[str] , _A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = DecisionTransformerModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A , _A , _A , _A , _A , _A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DecisionTransformerModelTester(self )
UpperCAmelCase__ : str = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = DecisionTransformerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(_A )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_A )] , _A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Dict = 10 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[int] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
UpperCAmelCase__ : Union[str, Any] = model.to(_A )
UpperCAmelCase__ : str = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : str = torch.randn(1 , 1 , config.state_dim ).to(device=_A , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=_A )
UpperCAmelCase__ : Tuple = torch.tensor(_A , device=_A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Tuple = state
UpperCAmelCase__ : Union[str, Any] = torch.zeros(1 , 0 , config.act_dim , device=_A , dtype=torch.floataa )
UpperCAmelCase__ : int = torch.zeros(1 , 0 , device=_A , dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(0 , device=_A , dtype=torch.long ).reshape(1 , 1 )
for step in range(_A ):
UpperCAmelCase__ : Optional[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_A )] , dim=1 )
UpperCAmelCase__ : Union[str, Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=_A )] , dim=1 )
UpperCAmelCase__ : List[str] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = model(
states=_A , actions=_A , rewards=_A , returns_to_go=_A , timesteps=_A , attention_mask=_A , return_dict=_A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_A , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Optional[Any] = action_pred[0, -1]
UpperCAmelCase__ : Tuple = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Any = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Union[str, Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat(
[timesteps, torch.ones((1, 1) , device=_A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 75 |
'''simple docstring'''
import random
from typing import Any
def a__ ( lowerCAmelCase__ ) -> list[Any]:
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 75 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : list[list[str]] = [[] for _ in range(lowerCAmelCase__ )]
UpperCAmelCase__ : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(lowerCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : Tuple = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCAmelCase__ )
UpperCAmelCase__ : int = [''''''.join(lowerCAmelCase__ ) for row in temp_grid]
UpperCAmelCase__ : Dict = ''''''.join(lowerCAmelCase__ )
return output_string
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
UpperCAmelCase__ : list[list[str]] = [[] for _ in range(lowerCAmelCase__ )] # generates template
for position in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : Optional[int] = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
UpperCAmelCase__ : Tuple = 0
for row in temp_grid: # fills in the characters
UpperCAmelCase__ : Tuple = input_string[counter : counter + len(lowerCAmelCase__ )]
grid.append(list(lowerCAmelCase__ ) )
counter += len(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = '''''' # reads as zigzag
for position in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : int = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a__ ( lowerCAmelCase__ ) -> dict[int, str]:
UpperCAmelCase__ : Tuple = {}
for key_guess in range(1 , len(lowerCAmelCase__ ) ): # tries every key
UpperCAmelCase__ : Any = decrypt(lowerCAmelCase__ , lowerCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 1 |
'''simple docstring'''
import operator as op
UpperCamelCase__ = '''scaler.pt'''
UpperCamelCase__ = '''pytorch_model'''
UpperCamelCase__ = '''random_states'''
UpperCamelCase__ = '''optimizer'''
UpperCamelCase__ = '''scheduler'''
UpperCamelCase__ = '''pytorch_model.bin'''
UpperCamelCase__ = '''pytorch_model.bin.index.json'''
UpperCamelCase__ = '''model.safetensors'''
UpperCamelCase__ = '''model.safetensors.index.json'''
UpperCamelCase__ = '''1.10.2'''
UpperCamelCase__ = '''py38'''
UpperCamelCase__ = '''4.17.0'''
UpperCamelCase__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
UpperCamelCase__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
UpperCamelCase__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
UpperCamelCase__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
UpperCamelCase__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
UpperCamelCase__ = '''2.0.1'''
UpperCamelCase__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
UpperCamelCase__ = ['''default''', '''reduce-overhead''', '''max-autotune''']
UpperCamelCase__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase__ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
UpperCamelCase__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
UpperCamelCase__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 75 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 1 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCamelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
UpperCamelCase__ = logging.WARNING
def a__ ( ) -> Optional[int]:
UpperCAmelCase__ : int = os.getenv('''DATASETS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ) -> str:
return __name__.split('''.''' )[0]
def a__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def a__ ( ) -> None:
# Apply our default configuration to the library root logger.
UpperCAmelCase__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def a__ ( ) -> None:
UpperCAmelCase__ : str = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def a__ ( lowerCAmelCase__ = None ) -> logging.Logger:
if name is None:
UpperCAmelCase__ : int = _get_library_name()
return logging.getLogger(lowerCAmelCase__ )
def a__ ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def a__ ( lowerCAmelCase__ ) -> None:
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def a__ ( ) -> Dict:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> Any:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> Union[str, Any]:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> Dict:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> None:
UpperCAmelCase__ : Dict = False
def a__ ( ) -> None:
UpperCAmelCase__ : Tuple = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCamelCase_ :
def __init__( self : str , *_A : Dict , **_A : List[str] ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase__ : Any = args[0] if args else None
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Optional[Any] , _A : List[Any] ):
'''simple docstring'''
def empty_fn(*_A : List[Any] , **_A : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict ):
'''simple docstring'''
return self
def __exit__( self : int , _A : Dict , _A : Tuple , _A : List[Any] ):
'''simple docstring'''
return
UpperCamelCase__ = True
class lowerCamelCase_ :
def __call__( self : str , *_A : List[str] , _A : Tuple=False , **_A : int ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : List[str] , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase__ = _tqdm_cls()
def a__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ) -> str:
global _tqdm_active
UpperCAmelCase__ : Union[str, Any] = True
def a__ ( ) -> Optional[int]:
global _tqdm_active
UpperCAmelCase__ : Dict = False
| 75 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | 1 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCamelCase_ ( nn.Module ):
def __init__( self : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : str = nn.Linear(3 , 4 )
UpperCAmelCase__ : Optional[int] = nn.BatchNormad(4 )
UpperCAmelCase__ : Dict = nn.Linear(4 , 5 )
def lowercase_ ( self : str , _A : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict() )
UpperCAmelCase__ : Dict = os.path.join(_A , '''index.json''' )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCAmelCase__ : Optional[int] = os.path.join(_A , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on the fact weights are properly loaded
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCAmelCase__ : List[str] = torch.randn(2 , 3 , dtype=_A )
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[str] = offload_weight(_A , '''weight''' , _A , {} )
UpperCAmelCase__ : Optional[int] = os.path.join(_A , '''weight.dat''' )
self.assertTrue(os.path.isfile(_A ) )
self.assertDictEqual(_A , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(_A ).split('''.''' )[1]}} )
UpperCAmelCase__ : List[Any] = load_offloaded_weight(_A , index['''weight'''] )
self.assertTrue(torch.equal(_A , _A ) )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ModelForTest()
UpperCAmelCase__ : Optional[Any] = model.state_dict()
UpperCAmelCase__ : Optional[int] = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
UpperCAmelCase__ : int = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
UpperCAmelCase__ : Dict = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
UpperCAmelCase__ : Optional[int] = {k: v for k, v in state_dict.items() if '''weight''' in k}
UpperCAmelCase__ : List[str] = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
UpperCAmelCase__ : str = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
# Duplicates are removed
UpperCAmelCase__ : int = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
UpperCAmelCase__ : int = extract_submodules_state_dict(_A , ['''a.1''', '''a.2'''] )
self.assertDictEqual(_A , {'''a.1''': 0, '''a.2''': 2} )
UpperCAmelCase__ : Any = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
UpperCAmelCase__ : int = extract_submodules_state_dict(_A , ['''a.1''', '''a.2'''] )
self.assertDictEqual(_A , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 75 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCamelCase__ = namedtuple('''covid_data''', '''cases deaths recovered''')
def a__ ( lowerCAmelCase__ = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
UpperCAmelCase__ : Union[str, Any] = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(lowerCAmelCase__ ).content ).xpath(lowerCAmelCase__ ) )
UpperCamelCase__ = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 75 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 0 ) -> list:
UpperCAmelCase__ : Optional[int] = length or len(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = list_data[i + 1], list_data[i]
UpperCAmelCase__ : Optional[int] = True
return list_data if not swapped else bubble_sort(lowerCAmelCase__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 1 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : List[Any] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase__ : Dict = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ ):
for patt, repl in iter(lowerCAmelCase__ ):
UpperCAmelCase__ : str = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return F"""bert/{name}"""
def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase__ : Optional[int] = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase__ : Union[str, Any] = to_tf_var_name(lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase__ : List[Any] = torch_tensor.T
UpperCAmelCase__ : Optional[Any] = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Dict = session.run(lowerCAmelCase__ )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}""" )
UpperCAmelCase__ : Optional[int] = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def a__ ( lowerCAmelCase__=None ) -> str:
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase__ : str = parser.parse_args(lowerCAmelCase__ )
UpperCAmelCase__ : int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 1 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 1 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Dict = args.log_outputs
UpperCAmelCase__ : int = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : int = load_metric('''wer''' )
UpperCAmelCase__ : Optional[int] = load_metric('''cer''' )
# compute metrics
UpperCAmelCase__ : int = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
UpperCAmelCase__ : Any = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
UpperCAmelCase__ : int = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCAmelCase__ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : Tuple = F"""log_{dataset_id}_predictions.txt"""
UpperCAmelCase__ : Dict = F"""log_{dataset_id}_targets.txt"""
with open(lowerCAmelCase__ , '''w''' ) as p, open(lowerCAmelCase__ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase__ , lowerCAmelCase__ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCAmelCase__ , with_indices=lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : List[Any] = re.sub(lowerCAmelCase__ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Any = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
UpperCAmelCase__ : Optional[Any] = ''' '''.join(text.split(lowerCAmelCase__ ) )
return text
def a__ ( lowerCAmelCase__ ) -> str:
# load dataset
UpperCAmelCase__ : Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : Any = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : List[str] = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCAmelCase__ ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : Any = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : int = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction['''text''']
UpperCAmelCase__ : Union[str, Any] = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
UpperCAmelCase__ : Optional[int] = dataset.map(lowerCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 75 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCamelCase__ = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : str = set()
UpperCAmelCase__ : List[str] = []
def parse_line(lowerCAmelCase__ ):
for line in fp:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[Any] = '''\n'''.join(lowerCAmelCase__ )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowerCAmelCase__ )
buffer.clear()
continue
else:
UpperCAmelCase__ : Union[str, Any] = line.strip()
buffer.append(lowerCAmelCase__ )
if from_gh:
for filename in os.listdir(lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase__ ) as fp:
parse_line(lowerCAmelCase__ )
else:
try:
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase__ ) as fp:
parse_line(lowerCAmelCase__ )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : Tuple = set()
UpperCAmelCase__ : Optional[int] = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase__ , lowerCAmelCase__ ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( lowerCAmelCase__ ) -> Any:
return values.split(''',''' )
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCamelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCamelCase__ = extract_warnings(args.output_dir, args.targets)
UpperCamelCase__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 75 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCAmelCase__ : List[Any] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_A ) , torch_builtin(_A ) ) )
self.assertFalse(torch.allclose(gelu_python(_A ) , gelu_new(_A ) ) )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCAmelCase__ : Any = get_activation('''gelu''' )
UpperCAmelCase__ : List[Any] = get_activation('''gelu_10''' )
UpperCAmelCase__ : Tuple = torch_builtin(_A )
UpperCAmelCase__ : List[str] = geluaa(_A )
UpperCAmelCase__ : Any = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_A ):
get_activation('''bogus''' )
with self.assertRaises(_A ):
get_activation(_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_activation('''gelu''' )
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : Any = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_A ):
UpperCAmelCase__ : str = acta.a
| 75 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a__ ( ) -> List[str]:
raise RuntimeError('''CUDA out of memory.''' )
class lowerCamelCase_ ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = nn.Linear(3 , 4 )
UpperCAmelCase__ : Optional[Any] = nn.BatchNormad(4 )
UpperCAmelCase__ : int = nn.Linear(4 , 5 )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_A : Tuple ):
nonlocal batch_sizes
batch_sizes.append(_A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_A , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_A : Union[str, Any] , _A : Dict ):
nonlocal batch_sizes
batch_sizes.append(_A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = mock_training_loop_function('''hello''' )
self.assertListEqual(_A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_A : Dict ):
pass
with self.assertRaises(_A ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Dict ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_A : Optional[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_A ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : int ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_A : Dict , _A : Dict , _A : List[str] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_A ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_A : int ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(_A ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = torch.cuda.memory_allocated()
UpperCAmelCase__ : Optional[Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _A )
UpperCAmelCase__ : Dict = release_memory(_A )
self.assertEqual(torch.cuda.memory_allocated() , _A )
| 75 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'openai-gpt'
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , _A : Optional[Any]=40_478 , _A : List[str]=512 , _A : Dict=768 , _A : List[str]=12 , _A : Dict=12 , _A : Optional[Any]="gelu" , _A : List[str]=0.1 , _A : List[str]=0.1 , _A : Optional[int]=0.1 , _A : Tuple=1e-5 , _A : Union[str, Any]=0.0_2 , _A : str="cls_index" , _A : List[str]=True , _A : Optional[Any]=None , _A : Tuple=True , _A : Dict=0.1 , **_A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : int = n_positions
UpperCAmelCase__ : List[str] = n_embd
UpperCAmelCase__ : List[Any] = n_layer
UpperCAmelCase__ : str = n_head
UpperCAmelCase__ : Union[str, Any] = afn
UpperCAmelCase__ : Dict = resid_pdrop
UpperCAmelCase__ : Union[str, Any] = embd_pdrop
UpperCAmelCase__ : Tuple = attn_pdrop
UpperCAmelCase__ : List[Any] = layer_norm_epsilon
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = summary_type
UpperCAmelCase__ : Union[str, Any] = summary_use_proj
UpperCAmelCase__ : List[str] = summary_activation
UpperCAmelCase__ : str = summary_first_dropout
UpperCAmelCase__ : int = summary_proj_to_labels
super().__init__(**_A )
| 75 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase__ = '''CompVis/stable-diffusion-v1-1'''
UpperCamelCase__ = '''CompVis/stable-diffusion-v1-2'''
UpperCamelCase__ = '''CompVis/stable-diffusion-v1-3'''
UpperCamelCase__ = '''CompVis/stable-diffusion-v1-4'''
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _A : StableDiffusionSafetyChecker , _A : CLIPImageProcessor , _A : bool = True , ):
'''simple docstring'''
super()._init_()
UpperCAmelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(_A )
UpperCAmelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained(_A )
UpperCAmelCase__ : str = StableDiffusionPipeline.from_pretrained(_A )
UpperCAmelCase__ : str = StableDiffusionPipeline(
vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , safety_checker=_A , feature_extractor=_A , requires_safety_checker=_A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {k: getattr(self , _A ) for k in self.config.keys() if not k.startswith('''_''' )}
def lowercase_ ( self : Union[str, Any] , _A : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.enable_attention_slicing(_A )
@torch.no_grad()
def lowercase_ ( self : List[Any] , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Tuple , ):
'''simple docstring'''
return self.pipea(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
@torch.no_grad()
def lowercase_ ( self : int , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : List[str] , ):
'''simple docstring'''
return self.pipea(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
@torch.no_grad()
def lowercase_ ( self : Optional[int] , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Any , ):
'''simple docstring'''
return self.pipea(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
@torch.no_grad()
def lowercase_ ( self : Any , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Union[str, Any] , ):
'''simple docstring'''
return self.pipea(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
@torch.no_grad()
def lowercase_ ( self : str , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase__ : List[Any] = self.textaimg_sda_a(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase__ : List[Any] = self.textaimg_sda_a(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase__ : List[Any] = self.textaimg_sda_a(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase__ : List[str] = self.textaimg_sda_a(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 75 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 1 |
'''simple docstring'''
from typing import Any
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
UpperCAmelCase__ : dict = {}
UpperCAmelCase__ : dict = {}
for state in states_space:
UpperCAmelCase__ : str = observations_space[0]
UpperCAmelCase__ : List[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCAmelCase__ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : List[str] = observations_space[o]
UpperCAmelCase__ : Optional[int] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCAmelCase__ : Optional[int] = ''''''
UpperCAmelCase__ : Tuple = -1
for k_state in states_space:
UpperCAmelCase__ : Any = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCAmelCase__ : Optional[int] = probability
UpperCAmelCase__ : Any = k_state
# Update probabilities and pointers dicts
UpperCAmelCase__ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCAmelCase__ : Optional[Any] = arg_max
# The final observation
UpperCAmelCase__ : str = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
UpperCAmelCase__ : int = ''''''
UpperCAmelCase__ : Optional[Any] = -1
for k_state in states_space:
UpperCAmelCase__ : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCAmelCase__ : Optional[Any] = probability
UpperCAmelCase__ : List[Any] = k_state
UpperCAmelCase__ : Union[str, Any] = arg_max
# Process pointers backwards
UpperCAmelCase__ : Union[str, Any] = last_state
UpperCAmelCase__ : int = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
UpperCAmelCase__ : int = F"""{var_name} must be a list"""
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = F"""{var_name} must be a list of strings"""
raise ValueError(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = F"""{var_name} must be a dict"""
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
UpperCAmelCase__ : int = F"""{var_name} all keys must be strings"""
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
UpperCAmelCase__ : str = '''nested dictionary ''' if nested else ''''''
UpperCAmelCase__ : Dict = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 75 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 1 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
UpperCamelCase__ = Mapping[str, np.ndarray]
UpperCamelCase__ = Mapping[str, Any] # Is a nested dict.
UpperCamelCase__ = 0.01
@dataclasses.dataclass(frozen=__a )
class lowerCamelCase_ :
lowerCAmelCase__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ = None
# Chain corresponding to each parent
lowerCAmelCase__ = None
def a__ ( lowerCAmelCase__ ) -> Protein:
UpperCAmelCase__ : str = R'''(\[[A-Z]+\]\n)'''
UpperCAmelCase__ : List[str] = [tag.strip() for tag in re.split(lowerCAmelCase__ , lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0]
UpperCAmelCase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
UpperCAmelCase__ : List[str] = ["N", "CA", "C"]
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : List[Any] = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCAmelCase__ : Dict = g[1][0].strip()
for i in range(len(lowerCAmelCase__ ) ):
if seq[i] not in residue_constants.restypes:
UpperCAmelCase__ : List[str] = '''X''' # FIXME: strings are immutable
UpperCAmelCase__ : Tuple = np.array(
[residue_constants.restype_order.get(lowerCAmelCase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCAmelCase__ : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(lowerCAmelCase__ , g[1][axis].split() ) ) )
UpperCAmelCase__ : List[str] = np.array(lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCAmelCase__ : Optional[Any] = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
UpperCAmelCase__ : List[Any] = np.zeros(
(
len(lowerCAmelCase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : str = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowerCAmelCase__ , atom_mask=lowerCAmelCase__ , aatype=lowerCAmelCase__ , residue_index=np.arange(len(lowerCAmelCase__ ) ) , b_factors=lowerCAmelCase__ , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 0 ) -> List[str]:
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
UpperCAmelCase__ : Optional[Any] = prot.parents
UpperCAmelCase__ : Optional[Any] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCAmelCase__ : int = [p for i, p in zip(lowerCAmelCase__ , lowerCAmelCase__ ) if i == chain_id]
if parents is None or len(lowerCAmelCase__ ) == 0:
UpperCAmelCase__ : List[Any] = ['''N/A''']
pdb_headers.append(F"""PARENT {" ".join(lowerCAmelCase__ )}""" )
return pdb_headers
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = pdb_str.split('''\n''' )
UpperCAmelCase__ : int = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
UpperCAmelCase__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
UpperCAmelCase__ : List[str] = []
if prot.parents_chain_index is not None:
UpperCAmelCase__ : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowerCAmelCase__ ) , [] )
parent_dict[str(lowerCAmelCase__ )].append(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = max([int(lowerCAmelCase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCAmelCase__ : Dict = parent_dict.get(str(lowerCAmelCase__ ) , ['''N/A'''] )
parents_per_chain.append(lowerCAmelCase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCAmelCase__ : Dict = [['''N/A''']]
def make_parent_line(lowerCAmelCase__ ) -> str:
return F"""PARENT {" ".join(lowerCAmelCase__ )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCAmelCase__ : Optional[int] = 0
for i, l in enumerate(lowerCAmelCase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowerCAmelCase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = parents_per_chain[chain_counter]
else:
UpperCAmelCase__ : Dict = ['''N/A''']
out_pdb_lines.append(make_parent_line(lowerCAmelCase__ ) )
return "\n".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Dict = residue_constants.restypes + ['''X''']
def res_atoa(lowerCAmelCase__ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
UpperCAmelCase__ : List[str] = residue_constants.atom_types
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = prot.atom_mask
UpperCAmelCase__ : str = prot.aatype
UpperCAmelCase__ : int = prot.atom_positions
UpperCAmelCase__ : Any = prot.residue_index.astype(np.intaa )
UpperCAmelCase__ : Optional[int] = prot.b_factors
UpperCAmelCase__ : Dict = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
UpperCAmelCase__ : Any = get_pdb_headers(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
pdb_lines.extend(lowerCAmelCase__ )
UpperCAmelCase__ : int = aatype.shape[0]
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = string.ascii_uppercase
UpperCAmelCase__ : str = None
# Add all atom sites.
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowerCAmelCase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCAmelCase__ : Dict = '''ATOM'''
UpperCAmelCase__ : List[Any] = atom_name if len(lowerCAmelCase__ ) == 4 else F""" {atom_name}"""
UpperCAmelCase__ : Tuple = ''''''
UpperCAmelCase__ : Dict = ''''''
UpperCAmelCase__ : Optional[Any] = 1.0_0
UpperCAmelCase__ : Dict = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCAmelCase__ : int = ''''''
UpperCAmelCase__ : Any = '''A'''
if chain_index is not None:
UpperCAmelCase__ : Any = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCAmelCase__ : Optional[Any] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(lowerCAmelCase__ )
atom_index += 1
UpperCAmelCase__ : int = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : int = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCAmelCase__ : str = '''TER'''
UpperCAmelCase__ : List[Any] = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(lowerCAmelCase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowerCAmelCase__ , lowerCAmelCase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Protein:
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowerCAmelCase__ , remark=lowerCAmelCase__ , parents=lowerCAmelCase__ , parents_chain_index=lowerCAmelCase__ , )
| 75 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75 | 1 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def a__ ( lowerCAmelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : str = np.nan
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = features[:, labels == i]
UpperCAmelCase__ : List[Any] = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase__ : List[str] = data - column_reshape(lowerCAmelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCAmelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ : Tuple = np.dot(lowerCAmelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : int = features.mean(1 )
UpperCAmelCase__ : List[Any] = np.nan
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = features[:, labels == i]
UpperCAmelCase__ : List[Any] = data.shape[1]
UpperCAmelCase__ : int = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ ) , (column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ : Union[str, Any] = device_data * np.dot(
column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ ) , (column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ )).T , )
return covariance_sum / features.shape[1]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
UpperCAmelCase__ : List[str] = features.mean(1 )
# Center the dataset
UpperCAmelCase__ : Union[str, Any] = features - np.reshape(lowerCAmelCase__ , (data_mean.size, 1) )
UpperCAmelCase__ : List[str] = np.dot(lowerCAmelCase__ , centered_data.T ) / features.shape[1]
UpperCAmelCase__ , UpperCAmelCase__ : Dict = np.linalg.eigh(lowerCAmelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase__ : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase__ : List[str] = np.dot(filtered_eigenvectors.T , lowerCAmelCase__ )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=lowerCAmelCase__ )
logging.error('''Dataset empty''' )
raise AssertionError
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = eigh(
covariance_between_classes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , covariance_within_classes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
UpperCAmelCase__ : List[Any] = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = np.linalg.svd(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = svd_matrix[:, 0:dimensions]
UpperCAmelCase__ : List[Any] = np.dot(filtered_svd_matrix.T , lowerCAmelCase__ )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=lowerCAmelCase__ )
logging.error('''Dataset empty''' )
raise AssertionError
def a__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase__ : List[str] = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCAmelCase__ ) as error_info:
UpperCAmelCase__ : Optional[int] = linear_discriminant_analysis(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def a__ ( ) -> None:
UpperCAmelCase__ : Union[str, Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : List[Any] = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCAmelCase__ ) as error_info:
UpperCAmelCase__ : str = principal_component_analysis(lowerCAmelCase__ , lowerCAmelCase__ )
if not np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a__ ( lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[int] = [False] * len(lowerCAmelCase__ )
UpperCAmelCase__ : str = [-1] * len(lowerCAmelCase__ )
def dfs(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : List[str] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase__ , 1 - c )
for i in range(len(lowerCAmelCase__ ) ):
if not visited[i]:
dfs(lowerCAmelCase__ , 0 )
for i in range(len(lowerCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 75 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 75 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 | 1 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 75 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple:
UpperCAmelCase__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import random
from typing import Any
def a__ ( lowerCAmelCase__ ) -> list[Any]:
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 75 | 1 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCamelCase__ = 2_0_4_8
UpperCamelCase__ = 4_0_9_6
UpperCamelCase__ = 4_2
UpperCamelCase__ = os.environ.pop('''PROCESS_TRAIN''', '''false''')
UpperCamelCase__ = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
def choose_first(lowerCAmelCase__ , lowerCAmelCase__=False ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 1:
UpperCAmelCase__ : Optional[int] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase__ : Tuple = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
UpperCAmelCase__ : List[Any] = {'''id''': example['''id''']}
UpperCAmelCase__ : int = example['''annotations''']
UpperCAmelCase__ : Any = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase__ : Union[str, Any] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Dict = ['''<cls>''']
else:
UpperCAmelCase__ : Tuple = ['''short''']
UpperCAmelCase__ : Optional[Any] = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
UpperCAmelCase__ : Dict = ['''long''']
UpperCAmelCase__ : List[str] = choose_first(annotation['''long_answer'''] , is_long_answer=lowerCAmelCase__ )
UpperCAmelCase__ : str = []
answer.update(lowerCAmelCase__ )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase__ : Optional[int] = True
else:
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : str = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , lowerCAmelCase__ ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = _get_single_answer(lowerCAmelCase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ : int = example['''document''']['''tokens''']
UpperCAmelCase__ : str = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(lowerCAmelCase__ ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase__ : List[Any] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase__ : str = example['''document''']['''tokens''']
UpperCAmelCase__ : List[Any] = answer['''start_token''']
UpperCAmelCase__ : int = answer['''end_token''']
UpperCAmelCase__ : Union[str, Any] = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase__ : Any = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase__ : Any = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
UpperCAmelCase__ : Union[str, Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
UpperCAmelCase__ : str = ''' '''.join([old[i] for i in range(len(lowerCAmelCase__ ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , lowerCAmelCase__ , end='''\n''' )
print('''Old:''' , lowerCAmelCase__ , end='''\n\n''' )
return {
"context": " ".join(lowerCAmelCase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=20_48 , lowerCAmelCase__=40_96 , lowerCAmelCase__=True ) -> Dict:
# overlap will be of doc_stride - q_len
UpperCAmelCase__ : Any = get_context_and_ans(lowerCAmelCase__ , assertion=lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase__ : str = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
UpperCAmelCase__ : Optional[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : int = input_ids[:q_len]
UpperCAmelCase__ : Any = range(lowerCAmelCase__ , len(lowerCAmelCase__ ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase__ : Tuple = i + max_length - q_len
UpperCAmelCase__ : Optional[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(lowerCAmelCase__ ),
"end_token": [-1_00] * len(lowerCAmelCase__ ),
"category": category,
},
}
UpperCAmelCase__ : Any = out['''context'''].split()
UpperCAmelCase__ : Any = splitted_context[answer['''end_token''']]
UpperCAmelCase__ : Dict = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=lowerCAmelCase__ , ).input_ids )
UpperCAmelCase__ : Optional[Any] = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=lowerCAmelCase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase__ : Union[str, Any] = len(tokenizer(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase__ : Dict = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
UpperCAmelCase__ : List[Any] = answer['''start_token''']
UpperCAmelCase__ : Any = answer['''end_token''']
if assertion:
UpperCAmelCase__ : List[Any] = tokenizer.decode(lowerCAmelCase__ )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , lowerCAmelCase__ , end='''\n\n''' )
if len(lowerCAmelCase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase__ : Optional[Any] = input_ids[:q_len]
UpperCAmelCase__ : Any = range(lowerCAmelCase__ , len(lowerCAmelCase__ ) , max_length - doc_stride )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase__ : List[str] = i + max_length - q_len
UpperCAmelCase__ : Optional[int] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase__ : List[Any] = start_token - i + q_len
UpperCAmelCase__ : List[str] = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
UpperCAmelCase__ : Union[str, Any] = -1_00
UpperCAmelCase__ : Tuple = -1_00
answers_category.append('''null''' )
UpperCAmelCase__ : List[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCAmelCase__ )
answers_end_token.append(lowerCAmelCase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(lowerCAmelCase__ ) )
print('''Old:''' , tokenizer.decode(lowerCAmelCase__ ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=20_48 , lowerCAmelCase__=40_96 , lowerCAmelCase__=False ) -> int:
UpperCAmelCase__ : Any = get_strided_contexts_and_ans(
lowerCAmelCase__ , lowerCAmelCase__ , doc_stride=lowerCAmelCase__ , max_length=lowerCAmelCase__ , assertion=lowerCAmelCase__ , )
return example
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
with jsonlines.open(lowerCAmelCase__ , '''a''' ) as writer:
for example in tqdm(lowerCAmelCase__ , total=len(lowerCAmelCase__ ) , desc='''Saving samples ... ''' ):
UpperCAmelCase__ : Dict = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCamelCase__ = load_dataset('''natural_questions''')
UpperCamelCase__ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCamelCase__ = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
UpperCamelCase__ = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
UpperCamelCase__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCamelCase__ = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
UpperCamelCase__ = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 75 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Dict , _A : Tuple=13 , _A : str=30 , _A : str=2 , _A : Optional[Any]=3 , _A : Any=True , _A : List[Any]=True , _A : Tuple=32 , _A : Tuple=2 , _A : Union[str, Any]=4 , _A : Optional[Any]=37 , _A : Any="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=10 , _A : Optional[Any]=0.0_2 , _A : Any=3 , _A : Union[str, Any]=None , _A : List[str]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = type_sequence_label_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : int = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : List[Any] = num_patches + 2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : str = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Dict ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase_ ( self : int , _A : Optional[Any] , _A : Union[str, Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFDeiTModel(config=_A )
UpperCAmelCase__ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Dict , _A : Tuple , _A : Union[str, Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFDeiTForMaskedImageModeling(config=_A )
UpperCAmelCase__ : int = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : List[str] = TFDeiTForMaskedImageModeling(_A )
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self : List[Any] , _A : int , _A : int , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.type_sequence_label_size
UpperCAmelCase__ : Optional[int] = TFDeiTForImageClassification(_A )
UpperCAmelCase__ : Tuple = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : int = TFDeiTForImageClassification(_A )
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[int] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFDeiTModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
UpperCAmelCase__ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def lowercase_ ( self : List[str] , _A : int , _A : int , _A : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFDeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Any:
UpperCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
UpperCAmelCase__ : List[Any] = self.default_image_processor
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : Optional[int] = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase__ : List[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Dict = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Dict = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 75 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> list:
for i in range(len(lowerCAmelCase__ ) - 1 , 0 , -1 ):
UpperCAmelCase__ : Optional[int] = False
for j in range(lowerCAmelCase__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = unsorted[j - 1], unsorted[j]
UpperCAmelCase__ : List[str] = True
for j in range(lowerCAmelCase__ ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : int = unsorted[j + 1], unsorted[j]
UpperCAmelCase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 75 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | 1 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_1_2,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def a__ ( lowerCAmelCase__ ) -> Any:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 75 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ = 50 ) -> int:
UpperCAmelCase__ : int = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 1 |
'''simple docstring'''
from typing import Any
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = data
UpperCAmelCase__ : str = None
class lowerCamelCase_ :
def __init__( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = None
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.head
while temp is not None:
print(temp.data , end=''' ''' )
UpperCAmelCase__ : List[str] = temp.next
print()
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = Node(_A )
UpperCAmelCase__ : Union[str, Any] = self.head
UpperCAmelCase__ : Union[str, Any] = new_node
def lowercase_ ( self : List[str] , _A : str , _A : str ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
UpperCAmelCase__ : Optional[int] = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase__ : Optional[Any] = node_a.next
UpperCAmelCase__ : str = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase__ : List[str] = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 75 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'timm_backbone'
def __init__( self : List[Any] , _A : str=None , _A : List[str]=3 , _A : Dict=True , _A : Optional[Any]=True , _A : Tuple=None , **_A : Dict , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : List[Any] = backbone
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : str = features_only
UpperCAmelCase__ : List[str] = use_pretrained_backbone
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Union[str, Any] = out_indices if out_indices is not None else (-1,)
| 75 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['image_processor', 'tokenizer']
lowerCAmelCase__ = 'AutoImageProcessor'
lowerCAmelCase__ = 'AutoTokenizer'
def __init__( self : List[Any] , _A : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
super().__init__(_A , _A )
UpperCAmelCase__ : str = self.image_processor
def __call__( self : Dict , _A : Any=None , _A : Optional[Any]=None , _A : List[Any]=None , **_A : int ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
UpperCAmelCase__ : Dict = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
UpperCAmelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def lowercase_ ( self : Optional[int] , *_A : Any , **_A : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def lowercase_ ( self : Dict , *_A : Any , **_A : int ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 75 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 75 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
from manim import *
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : Optional[int] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Dict = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : Optional[int] = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : Optional[Any] = VGroup(_A , _A ).arrange(_A , buff=0 )
UpperCAmelCase__ : List[str] = Text('''CPU''' , font_size=24 )
UpperCAmelCase__ : Optional[int] = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_A )
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : Dict = Text('''GPU''' , font_size=24 )
UpperCAmelCase__ : str = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
gpu.move_to([-1, -1, 0] )
self.add(_A )
UpperCAmelCase__ : Dict = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Optional[Any] = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : int = Text('''Model''' , font_size=24 )
UpperCAmelCase__ : Union[str, Any] = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
model.move_to([3, -1.0, 0] )
self.add(_A )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(_A ):
rect.set_stroke(_A )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : str = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_A , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_A )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_A , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_A , buff=0.0 )
self.add(_A )
cpu_targs.append(_A )
UpperCAmelCase__ : str = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : int = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : List[str] = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase__ : Optional[int] = Group(_A , _A ).arrange(_A , aligned_edge=_A , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_A , _A )
UpperCAmelCase__ : List[Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_A , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[int] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_A ) , Write(_A ) )
self.play(Write(_A , run_time=1 ) , Create(_A , run_time=1 ) )
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Dict = []
for i, rect in enumerate(_A ):
UpperCAmelCase__ : int = fill.copy().set_fill(_A , opacity=0.7 )
target.move_to(_A )
first_animations.append(GrowFromCenter(_A , run_time=1 ) )
UpperCAmelCase__ : Tuple = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_A , run_time=1.5 ) )
self.play(*_A )
self.play(*_A )
self.wait()
| 75 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase__ = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
UpperCamelCase__ = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCamelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def a__ ( lowerCAmelCase__ ) -> dict[str, int]:
UpperCAmelCase__ : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( lowerCAmelCase__ ) -> str:
return x[0]
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = get_letter_count(lowerCAmelCase__ )
UpperCAmelCase__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
UpperCAmelCase__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = ''''''.join(freq_to_letter[freq] )
UpperCAmelCase__ : List[Any] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Union[str, Any] = get_frequency_order(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 1 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
UpperCAmelCase__ : Optional[int] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ : Tuple = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
assert base_extractor.is_extractable(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Dict = file_path.read_text(encoding='''utf-8''' )
else:
UpperCAmelCase__ : Union[str, Any] = output_path.read_text(encoding='''utf-8''' )
UpperCAmelCase__ : Dict = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
UpperCAmelCase__ : Dict = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ : int = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
UpperCAmelCase__ : Any = Extractor.infer_extractor_format(lowerCAmelCase__ )
assert extractor_format is not None
UpperCAmelCase__ : Any = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Optional[int] = file_path.read_text(encoding='''utf-8''' )
else:
UpperCAmelCase__ : Optional[int] = output_path.read_text(encoding='''utf-8''' )
UpperCAmelCase__ : Optional[int] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
import tarfile
UpperCAmelCase__ : int = tmp_path / '''data_dot_dot'''
directory.mkdir()
UpperCAmelCase__ : Tuple = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(lowerCAmelCase__ , '''w''' ) as f:
f.add(lowerCAmelCase__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a__ ( lowerCAmelCase__ ) -> Any:
import tarfile
UpperCAmelCase__ : str = tmp_path / '''data_sym_link'''
directory.mkdir()
UpperCAmelCase__ : Optional[Any] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=lowerCAmelCase__ )
with tarfile.TarFile(lowerCAmelCase__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Dict = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
UpperCAmelCase__ : Any = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ : Dict = tmp_path / '''extracted'''
TarExtractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
UpperCAmelCase__ : Dict = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ : List[str] = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(lowerCAmelCase__ )
assert zipfile.is_zipfile(str(lowerCAmelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCAmelCase__ ) # but we're right
| 75 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
UpperCamelCase__ = {
'''google/rembert''': 2_5_6,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _A : str , _A : Union[str, Any]=False , _A : Any=True , _A : Optional[Any]=True , _A : Tuple="[CLS]" , _A : Dict="[SEP]" , _A : Tuple="[UNK]" , _A : Dict="[SEP]" , _A : List[str]="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : int="[MASK]" , **_A : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , )
UpperCAmelCase__ : int = do_lower_case
UpperCAmelCase__ : Dict = remove_space
UpperCAmelCase__ : List[str] = keep_accents
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(_A )
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.__dict__.copy()
UpperCAmelCase__ : Any = None
return state
def __setstate__( self : List[str] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = d
UpperCAmelCase__ : str = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Any , _A : int , _A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.sp_model.EncodeAsPieces(_A )
return pieces
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def lowercase_ ( self : int , _A : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def lowercase_ ( self : Optional[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.sp_model.decode_pieces(_A )
return out_string
def lowercase_ ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def lowercase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Any , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_A ) )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 75 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.