code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A_ )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = field(default="image-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"image": Image()} )
lowerCAmelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
lowerCAmelCase : str = "image"
lowerCAmelCase : str = "labels"
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> str:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,_snake_case ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowercase__ : Union[str, Any] = copy.deepcopy(self )
lowercase__ : List[Any] = self.label_schema.copy()
lowercase__ : str = features[self.label_column]
lowercase__ : str = label_schema
return task_template
@property
def UpperCAmelCase ( self : List[str] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 16 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""image_processor""", """tokenizer"""]
_lowerCAmelCase = """CLIPImageProcessor"""
_lowerCAmelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> Optional[Any]:
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __magic_name__ , )
_a = kwargs.pop('feature_extractor' )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> str:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_a = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
_a = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> Dict:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> int:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCAmelCase ( self ) -> Any:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __magic_name__ , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __magic_name__ , )
return self.image_processor
| 104 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a_ : Optional[Any] = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *__magic_name__ , **__magic_name__ ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 104 | 1 |
"""simple docstring"""
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Any = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowercase ( A_ = 100 )-> int:
'''simple docstring'''
a : Tuple = 1
a : Optional[int] = 2
for i in range(2 , max_n + 1 ):
a : Optional[Any] = pre_numerator
a : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
a : Dict = cur_numerator
a : List[str] = e_cont * pre_numerator + temp
return sum_digits(A_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar("""T""")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase_ :bool = True ):
"""simple docstring"""
lowerCamelCase__ : dict[T, list[T]] ={} # dictionary of lists
lowerCamelCase__ : int =directed
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :T , lowerCamelCase_ :T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
self.adj_list[destination_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase__ : Union[str, Any] =[destination_vertex]
lowerCamelCase__ : Any =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase__ : Tuple =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase__ : str =[destination_vertex]
lowerCamelCase__ : Optional[Any] =[]
return self
def __repr__( self :Optional[Any] ):
"""simple docstring"""
return pformat(self.adj_list ) | 126 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = 'https://openaipublic.azureedge.net/jukebox/models/'
snake_case_ = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> List[str]:
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__snake_case = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__snake_case = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__snake_case = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__snake_case = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__snake_case = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__snake_case = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__snake_case = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__snake_case = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] ) -> Optional[Any]:
__snake_case = {}
import re
__snake_case = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__snake_case = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__snake_case = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__snake_case = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__snake_case = re_encoder_block_conv_in.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[2] ) * 2 + int(groups[3] )
__snake_case = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__snake_case = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__snake_case = re_encoder_block_resnet.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[2] ) * 2 + int(groups[3] )
__snake_case = {'''1''': 1, '''3''': 2}[groups[-2]]
__snake_case = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__snake_case = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__snake_case = prefix + resnet_block
__snake_case = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__snake_case = re_encoder_block_proj_out.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__snake_case = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__snake_case = re_decoder_block_conv_out.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[2] ) * 2 + int(groups[3] ) - 2
__snake_case = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__snake_case = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__snake_case = re_decoder_block_resnet.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[2] ) * 2 + int(groups[3] ) - 2
__snake_case = {'''1''': 1, '''3''': 2}[groups[-2]]
__snake_case = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__snake_case = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__snake_case = prefix + resnet_block
__snake_case = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__snake_case = re_decoder_block_proj_in.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__snake_case = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__snake_case = re_prior_cond_conv_out.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[1] ) * 2 + int(groups[2] ) - 2
__snake_case = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__snake_case = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__snake_case = re_prior_cond_resnet.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[1] ) * 2 + int(groups[2] ) - 2
__snake_case = {'''1''': 1, '''3''': 2}[groups[-2]]
__snake_case = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__snake_case = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__snake_case = prefix + resnet_block
__snake_case = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__snake_case = re_prior_cond_proj_in.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__snake_case = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__snake_case = original_key
__snake_case = replace_key(snake_case_ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
__snake_case = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__snake_case = original_key
__snake_case = original_key
__snake_case = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : str=None , snake_case_ : Tuple=None ) -> Any:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
__snake_case = requests.get(f"""{PREFIX}{file}""" , allow_redirects=snake_case_ )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=snake_case_ )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , '''wb''' ).write(r.content )
__snake_case = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__snake_case = JukeboxConfig.from_pretrained(snake_case_ )
__snake_case = JukeboxModel(snake_case_ )
__snake_case = []
__snake_case = {}
for i, dict_name in enumerate(snake_case_ ):
__snake_case = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['''model''']
__snake_case = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__snake_case = old_dic[k]
elif k.endswith('''.w''' ):
__snake_case = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__snake_case = old_dic[k]
else:
__snake_case = old_dic[k]
__snake_case = '''vqvae''' if i == 0 else f"""priors.{3 - i}"""
__snake_case = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__snake_case = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
snake_case_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 364 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return "".join(sorted(snake_case_ ) )
def lowerCamelCase__ ( snake_case_ : str ) -> list[str]:
return word_by_signature[signature(snake_case_ )]
snake_case_ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
snake_case_ = sorted({word.strip().lower() for word in data.splitlines()})
snake_case_ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case_ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 238 | 0 |
a_ = 8.31_4462 # Unit - J mol-1 K-1
def lowerCamelCase__ ( _a , _a , _a):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value.")
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase__ ( _a , _a , _a):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value.")
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 76 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def lowercase_ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 4, 64, 64) , SCREAMING_SNAKE_CASE_=False ) -> str:
__lowerCamelCase : List[str] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Tuple = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return image
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="CompVis/stable-diffusion-v1-4" ) -> Dict:
__lowerCamelCase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[Any] = 'bf16' if fpaa else None
__lowerCamelCase , __lowerCamelCase : str = FlaxUNetaDConditionModel.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='unet' , dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ )
return model, params
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 77, 7_68) , SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
__lowerCamelCase : Any = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 10_00, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.get_latents(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = model.apply(
{'params': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 10_00, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : List[str] = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.get_latents(SCREAMING_SNAKE_CASE_ , shape=(4, 4, 96, 96) , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , shape=(4, 77, 10_24) , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model.apply(
{'params': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Tuple = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
| 185 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE__ : str = ""
SCREAMING_SNAKE_CASE__ : Any = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def __magic_name__ ( ) -> None:
__lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print('''Processing...''' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase = random_chars(32 )
__lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
__lowerCamelCase = []
for anno in new_annos[index]:
__lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(f'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]:
__lowerCamelCase = []
__lowerCamelCase = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ):
__lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
__lowerCamelCase = in_file.readlines()
__lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' )
__lowerCamelCase = []
for obj_list in obj_lists:
__lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]:
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for idx in range(len(__lowerCAmelCase ) ):
__lowerCamelCase = []
__lowerCamelCase = img_list[idx]
path_list.append(__lowerCAmelCase )
__lowerCamelCase = anno_list[idx]
__lowerCamelCase = cva.imread(__lowerCAmelCase )
if flip_type == 1:
__lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
__lowerCamelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
__lowerCamelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 339 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
SCREAMING_SNAKE_CASE__ : Dict = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : Any = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = SqueezeBertTokenizer
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = do_lower_case
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str:
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
__lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 339 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = ProphetNetTokenizer
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = '''UNwant\u00E9d,running'''
_snake_case = '''unwanted, running'''
return input_text, output_text
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_snake_case = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_snake_case = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
_snake_case = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCamelCase ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
_snake_case = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
_snake_case = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 42 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''beit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=81_92 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.4 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2_55 ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = vocab_size
__SCREAMING_SNAKE_CASE :Dict = hidden_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE :str = layer_norm_eps
__SCREAMING_SNAKE_CASE :int = image_size
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :Any = num_channels
__SCREAMING_SNAKE_CASE :Any = use_mask_token
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_relative_position_bias
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE :List[str] = layer_scale_init_value
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :str = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Dict = out_indices
__SCREAMING_SNAKE_CASE :Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE :Union[str, Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE :Dict = auxiliary_channels
__SCREAMING_SNAKE_CASE :Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE :List[str] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE :List[Any] = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 191 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = (DDPMScheduler,)
def A ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any]):
_A : List[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE)
return config
def A ( self : List[str]):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE)
def A ( self : List[str]):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE , beta_end=SCREAMING_SNAKE_CASE)
def A ( self : int):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE)
def A ( self : str):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE)
def A ( self : int):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , )
def A ( self : Optional[int]):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any]):
for t in [0, 500, 999]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE)
def A ( self : Any):
_A : Tuple = self.scheduler_classes[0]
_A : Union[str, Any] = self.get_scheduler_config()
_A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def A ( self : Optional[Any]):
_A : Optional[int] = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config()
_A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE)
_A : List[Any] = len(SCREAMING_SNAKE_CASE)
_A : Optional[int] = self.dummy_model()
_A : Dict = self.dummy_sample_deter
_A : Tuple = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
_A : List[Any] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
_A : int = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_A : Union[str, Any] = pred_prev_sample
_A : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE))
_A : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def A ( self : Optional[int]):
_A : Optional[Any] = self.scheduler_classes[0]
_A : Dict = self.get_scheduler_config(prediction_type='v_prediction')
_A : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE)
_A : str = len(SCREAMING_SNAKE_CASE)
_A : str = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter
_A : Any = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
_A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
_A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_A : Union[str, Any] = pred_prev_sample
_A : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE))
_A : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def A ( self : Tuple):
_A : str = self.scheduler_classes[0]
_A : Tuple = self.get_scheduler_config()
_A : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE)
_A : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE)
_A : Tuple = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE):
if i == len(SCREAMING_SNAKE_CASE) - 1:
_A : str = -1
else:
_A : Any = timesteps[i + 1]
_A : str = scheduler.previous_timestep(SCREAMING_SNAKE_CASE)
_A : Any = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : int):
_A : int = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config()
_A : str = scheduler_class(**SCREAMING_SNAKE_CASE)
_A : int = [100, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.'):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE)
def A ( self : Any):
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : str = scheduler_class(**SCREAMING_SNAKE_CASE)
_A : str = [100, 87, 50, 1, 0]
_A : List[str] = len(SCREAMING_SNAKE_CASE)
with self.assertRaises(SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.'):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any]):
_A : str = self.scheduler_classes[0]
_A : Dict = self.get_scheduler_config()
_A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE) | 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_snake_case : Dict = datasets.logging.get_logger(__name__)
_snake_case : List[Any] = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_snake_case : Tuple = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_snake_case : List[str] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Union[str, Any]=False, lowerCAmelCase_ : Tuple=False, lowerCAmelCase_ : int=True, lowerCAmelCase_ : Tuple=False, lowerCAmelCase_ : Tuple="dummy_doc" ):
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowerCAmelCase_, key_doc_lines[doc], lowerCAmelCase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(lowerCAmelCase_, key_doc_lines[doc], lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowerCAmelCase_, sys_doc_lines[doc], lowerCAmelCase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(lowerCAmelCase_, key_doc_lines[doc], lowerCAmelCase_, lowerCAmelCase_ )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCAmelCase_, lowerCAmelCase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCAmelCase_, lowerCAmelCase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = reader.get_mention_assignments(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = get_coref_infos(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(lowerCAmelCase_, lowerCAmelCase_, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ), F"""Recall: {recall * 100:.2f}""", F""" Precision: {precision * 100:.2f}""", F""" F1: {fa * 100:.2f}""", )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 100
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Any ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def lowercase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=False ) -> Dict:
__lowerCAmelCase = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 284 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 284 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_a : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase ) -> Any:
if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Dict =["pixel_values"]
def __init__( self,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = 1 / 2_55,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56}
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,default_to_square=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,param_name="""crop_size""" )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = offset
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
__lowerCAmelCase = get_resize_output_image_size(__SCREAMING_SNAKE_CASE,size["""shortest_edge"""],default_to_square=__SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
__lowerCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__SCREAMING_SNAKE_CASE,size=__SCREAMING_SNAKE_CASE,resample=__SCREAMING_SNAKE_CASE,data_format=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__SCREAMING_SNAKE_CASE,size=(size["""height"""], size["""width"""]),data_format=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = image.astype(np.floataa )
if offset:
__lowerCAmelCase = image - (scale / 2)
return rescale(__SCREAMING_SNAKE_CASE,scale=__SCREAMING_SNAKE_CASE,data_format=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
return normalize(__SCREAMING_SNAKE_CASE,mean=__SCREAMING_SNAKE_CASE,std=__SCREAMING_SNAKE_CASE,data_format=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = ChannelDimension.FIRST,):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
__lowerCAmelCase = to_numpy_array(__SCREAMING_SNAKE_CASE )
if do_resize:
__lowerCAmelCase = self.resize(image=__SCREAMING_SNAKE_CASE,size=__SCREAMING_SNAKE_CASE,resample=__SCREAMING_SNAKE_CASE )
if do_center_crop:
__lowerCAmelCase = self.center_crop(__SCREAMING_SNAKE_CASE,size=__SCREAMING_SNAKE_CASE )
if do_rescale:
__lowerCAmelCase = self.rescale(image=__SCREAMING_SNAKE_CASE,scale=__SCREAMING_SNAKE_CASE,offset=__SCREAMING_SNAKE_CASE )
if do_normalize:
__lowerCAmelCase = self.normalize(image=__SCREAMING_SNAKE_CASE,mean=__SCREAMING_SNAKE_CASE,std=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = to_channel_dimension_format(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
return image
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = ChannelDimension.FIRST,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = offset if offset is not None else self.offset
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,default_to_square=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,param_name="""crop_size""" )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__lowerCAmelCase = make_batched(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[
self._preprocess_image(
image=__SCREAMING_SNAKE_CASE,do_resize=__SCREAMING_SNAKE_CASE,size=__SCREAMING_SNAKE_CASE,resample=__SCREAMING_SNAKE_CASE,do_center_crop=__SCREAMING_SNAKE_CASE,crop_size=__SCREAMING_SNAKE_CASE,do_rescale=__SCREAMING_SNAKE_CASE,rescale_factor=__SCREAMING_SNAKE_CASE,offset=__SCREAMING_SNAKE_CASE,do_normalize=__SCREAMING_SNAKE_CASE,image_mean=__SCREAMING_SNAKE_CASE,image_std=__SCREAMING_SNAKE_CASE,data_format=__SCREAMING_SNAKE_CASE,)
for img in video
]
for video in videos
]
__lowerCAmelCase = {"""pixel_values""": videos}
return BatchFeature(data=__SCREAMING_SNAKE_CASE,tensor_type=__SCREAMING_SNAKE_CASE )
| 352 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : List[str] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""decision_transformer"""
a : List[Any] =["""past_key_values"""]
a : Dict ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=17,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=1_28,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 46 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Optional[Any] = BarthezTokenizer
SCREAMING_SNAKE_CASE : List[Any] = BarthezTokenizerFast
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
def UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = '<pad>'
SCREAMING_SNAKE_CASE_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_1122 )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE_ : List[Any] = [0, 57, 3018, 7_0307, 91, 2]
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {'input_ids': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE_ : Any = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , )
| 253 |
lowerCAmelCase : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 253 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__lowerCAmelCase = 4
__lowerCAmelCase = 48
__lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCAmelCase = [6, 6, 6, 6]
__lowerCAmelCase = 60
__lowerCAmelCase = [6, 6, 6, 6]
__lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCAmelCase = 4
__lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 126
__lowerCAmelCase = 7
__lowerCAmelCase = 255.0
__lowerCAmelCase = ''
return config
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Any ):
if "patch_embed.proj" in name and "layers" not in name:
__lowerCAmelCase = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__lowerCAmelCase = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
__lowerCAmelCase = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
__lowerCAmelCase = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
__lowerCAmelCase = name.replace('attn', 'attention.self' )
if "norm1" in name:
__lowerCAmelCase = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
__lowerCAmelCase = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
__lowerCAmelCase = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
__lowerCAmelCase = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
__lowerCAmelCase = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
__lowerCAmelCase = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
__lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
__lowerCAmelCase = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__lowerCAmelCase = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__lowerCAmelCase = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
__lowerCAmelCase = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
__lowerCAmelCase = name.replace('upsample.2', 'upsample.convolution_1' )
__lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__lowerCAmelCase = name.replace('upsample.0.weight', 'upsample.conv.weight' )
__lowerCAmelCase = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
__lowerCAmelCase = 'swin2sr.' + name
return name
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Optional[Any] ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
__lowerCAmelCase = key.split('.' )
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = int(key_split[4] )
__lowerCAmelCase = config.embed_dim
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
pass
else:
__lowerCAmelCase = val
return orig_state_dict
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = get_config(lowerCAmelCase_ )
__lowerCAmelCase = SwinaSRForImageSuperResolution(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = convert_state_dict(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(lowerCAmelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
__lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw ).convert('RGB' )
__lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
__lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ),
] )
__lowerCAmelCase = transforms(lowerCAmelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
__lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
__lowerCAmelCase = model(lowerCAmelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 512, 512] )
__lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
__lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
__lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 512, 512] )
__lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
__lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], lowerCAmelCase_, atol=1E-3 )
print('Looks ok!' )
__lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
_snake_case : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 207 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def lowercase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=1_0 , )
return model
@property
def lowercase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
__lowerCAmelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = AudioDiffusionPipeline(vqvae=lowerCAmelCase_ , unet=self.dummy_unet , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 , return_dict=lowerCAmelCase_ )
__lowerCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = self.dummy_vqvae_and_unet
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(raw_audio=lowerCAmelCase_ , generator=lowerCAmelCase_ , start_step=5 , steps=1_0 )
__lowerCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = self.dummy_unet_condition
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase_ , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = torch.rand((1, 1, 1_0) )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , encoding=lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 207 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Tuple ) -> Dict:
'''simple docstring'''
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 1 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__(self , __a , __a=7 , __a=3 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , __a=True , __a=1 / 2_55 , __a=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def snake_case_ (self ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ (self , __a , __a=False ) -> str:
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(__a , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["shortest_edge"] * h / w )
UpperCamelCase = self.size["shortest_edge"]
elif w > h:
UpperCamelCase = self.size["shortest_edge"]
UpperCamelCase = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase = self.size["shortest_edge"]
UpperCamelCase = self.size["shortest_edge"]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(__a , key=lambda __a : item[0] )[0]
UpperCamelCase = max(__a , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ (self ) -> Dict:
UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ (self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ (self ) -> Any:
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "do_rescale" ) )
self.assertTrue(hasattr(__a , "do_pad" ) )
self.assertTrue(hasattr(__a , "size" ) )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __a )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __a )
def snake_case_ (self ) -> Any:
pass
def snake_case_ (self ) -> int:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(__a , batched=__a )
UpperCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ (self ) -> Any:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ (self ) -> Tuple:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ (self ) -> Dict:
# prepare image and target
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"image_id": 3_97_69, "annotations": target}
# encode them
UpperCamelCase = DeformableDetrImageProcessor()
UpperCamelCase = image_processing(images=__a , annotations=__a , return_tensors="pt" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __a )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify orig_size
UpperCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
UpperCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
@slow
def snake_case_ (self ) -> List[str]:
# prepare image, target and masks_path
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
UpperCamelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCamelCase = DeformableDetrImageProcessor(format="coco_panoptic" )
UpperCamelCase = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="pt" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __a )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify masks
UpperCamelCase = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __a )
# verify orig_size
UpperCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
UpperCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
| 244 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a = None , __a = None , __a = False , **__a , ) -> List[Any]:
super().__init__(features=__a , cache_dir=__a , keep_in_memory=__a , **__a )
UpperCamelCase = Sql(
cache_dir=__a , features=__a , sql=__a , con=__a , **__a , )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , )
# Build dataset for splits
UpperCamelCase = self.builder.as_dataset(
split="train" , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
def __init__(self , __a , __a , __a , __a = None , __a = None , **__a , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
UpperCamelCase = dataset
UpperCamelCase = name
UpperCamelCase = con
UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase = num_proc
UpperCamelCase = to_sql_kwargs
def snake_case_ (self ) -> int:
UpperCamelCase = self.to_sql_kwargs.pop("sql" , __a )
UpperCamelCase = self.to_sql_kwargs.pop("con" , __a )
UpperCamelCase = self.to_sql_kwargs.pop("index" , __a )
UpperCamelCase = self._write(index=__a , **self.to_sql_kwargs )
return written
def snake_case_ (self , __a ) -> Any:
UpperCamelCase , UpperCamelCase , UpperCamelCase = args
UpperCamelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase = batch.to_pandas()
UpperCamelCase = df.to_sql(self.name , self.con , index=__a , **__a )
return num_rows or len(__a )
def snake_case_ (self , __a , **__a ) -> int:
UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase , UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 244 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A:
snake_case_ = 42
snake_case_ = None
# Automatically constructed
snake_case_ = "dict"
snake_case_ = None
snake_case_ = field(default='''Translation''' , init=a , repr=a )
def __call__( self ) -> Tuple:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A:
snake_case_ = None
snake_case_ = None
snake_case_ = None
# Automatically constructed
snake_case_ = "dict"
snake_case_ = None
snake_case_ = field(default='''TranslationVariableLanguages''' , init=a , repr=a )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = sorted(set(self.languages ) ) if self.languages else None
__a = len(self.languages ) if self.languages else None
def __call__( self ) -> Any:
'''simple docstring'''
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
__a = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
F"""Some languages in example ({', '.join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({', '.join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__a = []
for lang, text in translation_dict.items():
if isinstance(_snake_case , _snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__a , __a = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
} | 6 |
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int):
a : Tuple = size
a : Dict = [0] * size
a : Optional[int] = [0] * size
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return index | (index + 1)
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return (index & (index + 1)) - 1
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Union[str, Any] = value
while index < self.size:
a : Dict = self.get_prev(__UpperCAmelCase) + 1
if current_left_border == index:
a : Optional[int] = value
else:
a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_next(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
right -= 1 # Because of right is exclusive
a : List[str] = 0
while left <= right:
a : Dict = self.get_prev(__UpperCAmelCase)
if left <= current_left:
a : Optional[int] = max(__UpperCAmelCase , self.tree[right])
a : Optional[Any] = current_left
else:
a : List[str] = max(__UpperCAmelCase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class snake_case__ ( UpperCamelCase_ ):
def __init__( self , **lowerCamelCase ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self , lowerCamelCase , **lowerCamelCase ):
return super().__call__(_a , **_a )
def a__ ( self , **lowerCamelCase ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , "rb" ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def a__ ( self , lowerCamelCase ):
__a = model_inputs.pop("candidate_labels" )
__a = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def a__ ( self , lowerCamelCase ):
__a = model_outputs.pop("candidate_labels" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda lowerCamelCase : -x[0] )
]
return result
| 362 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = None
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__:Dict = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
SCREAMING_SNAKE_CASE__:List[str] = """▁"""
class snake_case__ ( snake_case_ ):
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = ["""input_ids""", """attention_mask"""]
_snake_case : str = BarthezTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , **lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , **lowerCamelCase , )
__a = vocab_file
__a = False if not self.vocab_file else True
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 268 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Optional[int] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ : int = 1
lowercase_ : List[Any] = 3
lowercase_ : List[Any] = (32, 32)
lowercase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def A ( self : int ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def A ( self : Optional[int] ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def A ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
lowercase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(A )
@property
def A ( self : Optional[Any] ) -> Union[str, Any]:
def extract(*A : int , **A : List[str] ):
class _UpperCAmelCase :
def __init__( self : List[Any] ) -> Tuple:
lowercase_ : Dict = torch.ones([0] )
def A ( self : str , A : Any ) -> str:
self.pixel_values.to(A )
return self
return Out()
return extract
def A ( self : int ) -> List[str]:
lowercase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ : Optional[Any] = self.dummy_cond_unet
lowercase_ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
lowercase_ : List[str] = self.dummy_vae
lowercase_ : Dict = self.dummy_text_encoder
lowercase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ : List[str] = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowercase_ : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowercase_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
lowercase_ : Tuple = torch.Generator(device=A ).manual_seed(0 )
lowercase_ : Tuple = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ : int = output.images
lowercase_ : Tuple = torch.Generator(device=A ).manual_seed(0 )
lowercase_ : List[str] = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=A , )[0]
lowercase_ : List[Any] = image[0, -3:, -3:, -1]
lowercase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : Tuple = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : int ) -> Tuple:
lowercase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ : Tuple = self.dummy_cond_unet
lowercase_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=A )
lowercase_ : Dict = self.dummy_vae
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ : int = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowercase_ : List[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowercase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowercase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowercase_ : Optional[Any] = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ : Any = output.images
lowercase_ : List[str] = torch.Generator(device=A ).manual_seed(0 )
lowercase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=A , )[0]
lowercase_ : Dict = image[0, -3:, -3:, -1]
lowercase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : Tuple = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Tuple ) -> Any:
lowercase_ : Any = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=A )
assert isinstance(A , A )
assert isinstance(pipe.scheduler , A )
assert pipe.safety_checker is None
lowercase_ : str = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
lowercase_ : List[str] = StableDiffusionPipeline.from_pretrained(A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase_ : Dict = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A ( self : Tuple ) -> Any:
lowercase_ : Optional[int] = self.dummy_cond_unet
lowercase_ : Dict = PNDMScheduler(skip_prk_steps=A )
lowercase_ : List[Any] = self.dummy_vae
lowercase_ : Any = self.dummy_text_encoder
lowercase_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase_ : int = unet.half()
lowercase_ : str = vae.half()
lowercase_ : str = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ : List[Any] = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowercase_ : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowercase_ : Optional[int] = '''A painting of a squirrel eating a burger'''
lowercase_ : Union[str, Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any ) -> Optional[Any]:
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=A )
lowercase_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowercase_ : Optional[int] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase_ : Dict = 40_03_66_03_46
lowercase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase_ : Optional[int] = torch.manual_seed(A )
lowercase_ : Optional[int] = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowercase_ : Union[str, Any] = output.images
lowercase_ : Tuple = image[0, -3:, -3:, -1]
lowercase_ : Any = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowercase_ : Union[str, Any] = torch.manual_seed(A )
lowercase_ : Dict = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ : Optional[int] = output.images
lowercase_ : Dict = image[0, -3:, -3:, -1]
lowercase_ : Optional[int] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Any ) -> List[str]:
lowercase_ : Optional[int] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=A )
lowercase_ : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ : Dict = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowercase_ : Union[str, Any] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase_ : Tuple = 27_34_97_17_55
lowercase_ : str = 7
lowercase_ : Optional[int] = torch.manual_seed(A )
lowercase_ : List[str] = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowercase_ : Optional[int] = output.images
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
lowercase_ : Optional[Any] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowercase_ : List[str] = torch.manual_seed(A )
lowercase_ : str = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ : int = output.images
lowercase_ : str = image[0, -3:, -3:, -1]
lowercase_ : str = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Tuple ) -> Optional[Any]:
lowercase_ : Optional[int] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase_ : Dict = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowercase_ : Dict = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase_ : Tuple = 10_44_35_52_34
lowercase_ : int = 12
lowercase_ : Tuple = torch.manual_seed(A )
lowercase_ : List[Any] = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowercase_ : List[str] = output.images
lowercase_ : Any = image[0, -3:, -3:, -1]
lowercase_ : List[str] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowercase_ : Tuple = torch.manual_seed(A )
lowercase_ : Any = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ : Tuple = output.images
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ : Optional[Any] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 33 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ ={
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCamelCase_ ={
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
# load decoder from hub
lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(lowerCAmelCase, '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ ='''This is a test string'''
lowerCamelCase_ =processor(text=lowerCAmelCase )
lowerCamelCase_ =tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ):
"""simple docstring"""
np.random.seed(lowerCAmelCase )
return np.random.rand(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 )
lowerCamelCase_ =processor.decode(lowerCAmelCase )
lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual('''</s> <s> </s>''', decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase )
else:
with get_context(lowerCAmelCase ).Pool() as pool:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as p:
lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase, decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text )
self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =15
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =-4.0
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =2.0
lowerCamelCase_ =5.0
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =True
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
decoder.reset_params(
alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -2_0.0 )
self.assertEqual(lm_model.score_boundary, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =os.listdir(lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase )
lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', )
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[d[key] for d in offsets]
return retrieved_list
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()[0]
lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase )
lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) )
lowerCamelCase_ =iter(lowerCAmelCase )
lowerCamelCase_ =next(lowerCAmelCase )
lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy()
lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase )
lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase_ =[
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text )
# output times
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) )
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) )
# fmt: off
lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
| 75 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase: Tuple = logging.get_logger(__name__)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" ,UpperCAmelCase_ ,)
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 1 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
A_ = logging.getLogger(__name__)
A_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class lowercase:
'''simple docstring'''
lowercase__ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowercase__ = dataclasses.field(
default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowercase:
'''simple docstring'''
lowercase__ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowercase__ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowercase__ = dataclasses.field(
default=__a , metadata={"help": "A csv or a json file containing the validation data."} )
lowercase__ = dataclasses.field(
default=__a , metadata={"help": "The name of the task to train on."} , )
lowercase__ = dataclasses.field(
default=__a , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowercase:
'''simple docstring'''
lowercase__ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowercase__ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
lowercase__ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
lowercase__ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase__ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
lowercase__ = dataclasses.field(
default=__a , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
lowercase__ = dataclasses.field(
default=__a , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
lowercase__ = dataclasses.field(
default=__a , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
lowercase__ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
lowercase__ = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase__ = dataclasses.field(
default=__a , metadata={"help": "Random seed for initialization."} , )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : int = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_snake_case : str = dataset.filter(lambda snake_case__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_snake_case : Optional[Any] = int(eval_result * len(snake_case__ ) )
print(snake_case__ )
_snake_case : Union[str, Any] = dataset.sort("""probability""" , reverse=snake_case__ )
_snake_case : int = dataset.select(range(snake_case__ ) )
_snake_case : Dict = dataset.remove_columns(["""label""", """probability"""] )
_snake_case : int = dataset.rename_column("""prediction""" , """label""" )
_snake_case : Dict = dataset.map(lambda snake_case__ : {"label": idalabel[example["label"]]} )
_snake_case : Optional[int] = dataset.shuffle(seed=args.seed )
_snake_case : List[Any] = os.path.join(snake_case__ , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(snake_case__ , index=snake_case__ )
else:
dataset.to_json(snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] , **snake_case__ : int ):
"""simple docstring"""
_snake_case : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_snake_case : List[str] = STModelArguments(model_name_or_path=snake_case__ )
_snake_case : Union[str, Any] = STDataArguments(train_file=snake_case__ , infer_file=snake_case__ )
_snake_case : List[Any] = STTrainingArguments(output_dir=snake_case__ )
_snake_case : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(snake_case__ ).items():
setattr(snake_case__ , snake_case__ , snake_case__ )
for key, value in kwargs.items():
if hasattr(snake_case__ , snake_case__ ):
setattr(snake_case__ , snake_case__ , snake_case__ )
# Sanity checks
_snake_case : str = {}
_snake_case : int = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_snake_case : Any = args.train_file
_snake_case : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_snake_case : Tuple = args.eval_file
for key in data_files:
_snake_case : Tuple = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
_snake_case : Tuple = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
_snake_case : Any = F"{args.output_dir}/self-train_iter-{{}}".format
_snake_case : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
accelerator.wait_for_everyone()
_snake_case : str = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = 0
_snake_case : Optional[Any] = False
# Show the progress bar
_snake_case : Optional[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_snake_case : List[str] = data_dir_format(snake_case__ )
assert os.path.exists(snake_case__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_snake_case : Optional[int] = os.path.join(snake_case__ , """stage-1""" )
_snake_case : Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(snake_case__ , snake_case__ ):
arguments_dict.update({key: value} )
_snake_case : List[str] = os.path.join(snake_case__ , """best-checkpoint""" , snake_case__ )
if os.path.exists(snake_case__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , snake_case__ , snake_case__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , snake_case__ )
finetune(**snake_case__ )
accelerator.wait_for_everyone()
assert os.path.exists(snake_case__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , snake_case__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_snake_case : Any = os.path.join(snake_case__ , """best-checkpoint""" )
_snake_case : List[str] = os.path.join(snake_case__ , """stage-2""" )
# Update arguments_dict
_snake_case : Union[str, Any] = model_path
_snake_case : Union[str, Any] = data_files["""train"""]
_snake_case : Union[str, Any] = current_output_dir
_snake_case : Dict = os.path.join(snake_case__ , """best-checkpoint""" , snake_case__ )
if os.path.exists(snake_case__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , snake_case__ , snake_case__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , snake_case__ )
finetune(**snake_case__ )
accelerator.wait_for_everyone()
assert os.path.exists(snake_case__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , snake_case__ )
_snake_case : Any = iteration
_snake_case : Any = data_dir_format(iteration + 1 )
_snake_case : Dict = AutoConfig.from_pretrained(os.path.join(snake_case__ , """best-checkpoint""" ) )
_snake_case : List[Any] = config.idalabel
_snake_case : Optional[Any] = os.path.join(snake_case__ , """eval_results_best-checkpoint.json""" )
_snake_case : int = os.path.join(snake_case__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(snake_case__ )
with open(snake_case__ , """r""" ) as f:
_snake_case : Any = float(json.load(snake_case__ )[args.eval_metric] )
_snake_case : List[str] = os.path.join(snake_case__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(snake_case__ )
# Loading the dataset from local csv or json files.
_snake_case : List[str] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
_snake_case : Optional[Any] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
shutil.copy(snake_case__ , os.path.join(snake_case__ , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(snake_case__ ):
shutil.copy(snake_case__ , os.path.join(snake_case__ , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
accelerator.wait_for_everyone()
_snake_case : Any = os.path.join(snake_case__ , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_snake_case : Union[str, Any] = eval_result
if best_iteration is None:
_snake_case : List[Any] = new_iteration
_snake_case : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_snake_case : Dict = new_iteration
_snake_case : List[str] = new_eval_result
_snake_case : Dict = 0
else:
if new_eval_result == best_eval_result:
_snake_case : Union[str, Any] = new_iteration
_snake_case : int = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_snake_case : str = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , snake_case__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , snake_case__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(snake_case__ , F"eval_results_iter-{iteration}.json" ) , os.path.join(snake_case__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , snake_case__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(snake_case__ , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(snake_case__ , """eval_results_best-iteration.json""" ) , )
| 64 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> int:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = TFViTModel(config=_SCREAMING_SNAKE_CASE )
A_ = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A_ = self.image_size // 2
A_ = pixel_values[:, :, :image_size, :image_size]
A_ = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
A_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
A_ = self.type_sequence_label_size
A_ = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A_ = self.image_size // 2
A_ = pixel_values[:, :, :image_size, :image_size]
A_ = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ = 1
A_ = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> str:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowercase : List[Any] = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowercase : int = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def __A ( self ) -> List[Any]:
A_ = TFViTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __A ( self ) -> Optional[Any]:
pass
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __A ( self ) -> List[str]:
A_ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[Any]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[Any]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __A ( self ) -> Optional[Any]:
A_ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
A_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
A_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
| 18 | '''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass
| 18 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int = 10_00 ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 1, 1
_lowerCamelCase : Optional[Any] = []
for i in range(1, n + 1 ):
_lowerCamelCase : int = prev_numerator + 2 * prev_denominator
_lowerCamelCase : Optional[int] = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
_lowerCamelCase : Union[str, Any] = numerator
_lowerCamelCase : List[str] = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : int = ort.SessionOptions()
__lowerCamelCase : Optional[Any] = False
return options
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__lowerCamelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__lowerCamelCase : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = 'A red cat sitting on a park bench'
__lowerCamelCase : Union[str, Any] = np.random.RandomState(0 )
__lowerCamelCase : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : Tuple = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__lowerCamelCase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = 'A red cat sitting on a park bench'
__lowerCamelCase : Tuple = np.random.RandomState(0 )
__lowerCamelCase : Optional[int] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : Tuple = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 185 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''vit_msn'''
def __init__( self : Any , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : str=1E-06 , UpperCamelCase__ : str=224 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
| 371 | """simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _UpperCAmelCase( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , __a , __a , __a , __a = 1.0 , __a = None , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = initial_learning_rate
_UpperCamelCase = warmup_steps
_UpperCamelCase = power
_UpperCamelCase = decay_schedule_fn
_UpperCamelCase = name
def __call__( self , __a) -> List[str]:
'''simple docstring'''
with tf.name_scope(self.name or '''WarmUp''') as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_UpperCamelCase = tf.cast(__a , tf.floataa)
_UpperCamelCase = tf.cast(self.warmup_steps , tf.floataa)
_UpperCamelCase = global_step_float / warmup_steps_float
_UpperCamelCase = self.initial_learning_rate * tf.math.pow(__a , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=__a , )
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case = 0.0, __snake_case = 0.9, __snake_case = 0.999, __snake_case = 1e-8, __snake_case = None, __snake_case = None, __snake_case = 0.0, __snake_case = 1.0, __snake_case = None, ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=__lowerCAmelCase, )
if num_warmup_steps:
_UpperCamelCase = WarmUp(
initial_learning_rate=__lowerCAmelCase, decay_schedule_fn=__lowerCAmelCase, warmup_steps=__lowerCAmelCase, )
if weight_decay_rate > 0.0:
_UpperCamelCase = AdamWeightDecay(
learning_rate=__lowerCAmelCase, weight_decay_rate=__lowerCAmelCase, beta_a=__lowerCAmelCase, beta_a=__lowerCAmelCase, epsilon=__lowerCAmelCase, clipnorm=__lowerCAmelCase, global_clipnorm=__lowerCAmelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=__lowerCAmelCase, )
else:
_UpperCamelCase = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase, beta_a=__lowerCAmelCase, beta_a=__lowerCAmelCase, epsilon=__lowerCAmelCase, clipnorm=__lowerCAmelCase, global_clipnorm=__lowerCAmelCase, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _UpperCAmelCase( snake_case__ ):
def __init__( self , __a = 0.001 , __a = 0.9 , __a = 0.999 , __a = 1e-7 , __a = False , __a = 0.0 , __a = None , __a = None , __a = "AdamWeightDecay" , **__a , ) -> Dict:
'''simple docstring'''
super().__init__(__a , __a , __a , __a , __a , __a , **__a)
_UpperCamelCase = weight_decay_rate
_UpperCamelCase = include_in_weight_decay
_UpperCamelCase = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = {'''WarmUp''': WarmUp}
return super(__a , cls).from_config(__a , custom_objects=__a)
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
super(__a , self)._prepare_local(__a , __a , __a)
_UpperCamelCase = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''')
def UpperCAmelCase ( self , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self , __a , __a=None , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = list(zip(*__a))
return super(__a , self).apply_gradients(zip(__a , __a) , name=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_UpperCamelCase = apply_state or {}
_UpperCamelCase = apply_state.get((var_device, var_dtype))
if coefficients is None:
_UpperCamelCase = self._fallback_apply_state(__a , __a)
_UpperCamelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self , __a , __a , __a=None) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self._get_lr(var.device , var.dtype.base_dtype , __a)
_UpperCamelCase = self._decay_weights_op(__a , __a , __a)
with tf.control_dependencies([decay]):
return super(__a , self)._resource_apply_dense(__a , __a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a=None) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self._get_lr(var.device , var.dtype.base_dtype , __a)
_UpperCamelCase = self._decay_weights_op(__a , __a , __a)
with tf.control_dependencies([decay]):
return super(__a , self)._resource_apply_sparse(__a , __a , __a , **__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate})
return config
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__a , __a) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__a , __a) is not None:
return False
return True
class _UpperCAmelCase( snake_case__ ):
def __init__( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = None
@property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
if self._accum_steps is None:
_UpperCamelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=__a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''')
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __a) -> Dict:
'''simple docstring'''
if not self._gradients:
_UpperCamelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__a) , trainable=__a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(__a) != len(self._gradients):
raise ValueError(F'''Expected {len(self._gradients)} gradients, but got {len(__a)}''')
for accum_gradient, gradient in zip(self._gradients , __a):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__a)
self._accum_steps.assign_add(1)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__a))
| 194 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( )-> tuple[list[int], int]:
"""simple docstring"""
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_a = make_dataset()
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( )-> tuple[float, float]:
"""simple docstring"""
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 39 | 0 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
super().__init__(*lowercase , **lowercase )
_lowerCamelCase : Any = {}
def A_ ( self , lowercase , *lowercase , **lowercase ):
_lowerCamelCase : Optional[int] = super().add_tokens(lowercase , *lowercase , **lowercase )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def A_ ( self , lowercase , *lowercase , lowercase=1 , **lowercase ):
_lowerCamelCase : Optional[int] = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
else:
_lowerCamelCase : Any = []
for i in range(lowercase ):
_lowerCamelCase : Any = placeholder_token + F'''_{i}'''
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
_lowerCamelCase : Dict = output
def A_ ( self , lowercase , lowercase=False , lowercase=1.0 ):
if isinstance(lowercase , lowercase ):
_lowerCamelCase : List[str] = []
for i in range(len(lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCamelCase : List[str] = self.token_map[placeholder_token]
_lowerCamelCase : Any = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
_lowerCamelCase : List[Any] = copy.copy(lowercase )
random.shuffle(lowercase )
_lowerCamelCase : List[Any] = text.replace(lowercase , ' '.join(lowercase ) )
return text
def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
def A_ ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ):
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , ) | 12 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 1 |
'''simple docstring'''
a__ : List[Any] = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =WavaVecaPhonemeCTCTokenizer
UpperCamelCase__ : Tuple =False
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : str =(
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
__UpperCamelCase : Union[str, Any] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : List[Any] ={'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
__UpperCamelCase : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=20 , lowerCamelCase__=5 ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase__ )) for i in range(len(lowerCamelCase__ ) )]
__UpperCamelCase : str =list(filter(lambda lowerCamelCase__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCamelCase__ ) , lowerCamelCase__ ) )
if max_length is not None and len(lowerCamelCase__ ) > max_length:
__UpperCamelCase : Optional[Any] =toks[:max_length]
if min_length is not None and len(lowerCamelCase__ ) < min_length and len(lowerCamelCase__ ) > 0:
while len(lowerCamelCase__ ) < min_length:
__UpperCamelCase : Dict =toks + toks
# toks_str = [t[1] for t in toks]
__UpperCamelCase : Optional[Any] =[t[0] for t in toks]
# Ensure consistency
__UpperCamelCase : Tuple =tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
if " " not in output_txt and len(lowerCamelCase__ ) > 1:
__UpperCamelCase : int =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase__ )
)
if with_prefix_space:
__UpperCamelCase : List[str] =' ' + output_txt
__UpperCamelCase : Any =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
return output_txt, output_ids
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
__UpperCamelCase : int =tokenizer('m xxx ɪ' , do_phonemize=lowerCamelCase__ ).input_ids
self.assertEqual(lowerCamelCase__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
__UpperCamelCase : Optional[int] =tokenizer('m aaa ɪ ccc' , do_phonemize=lowerCamelCase__ ).input_ids
self.assertEqual(lowerCamelCase__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__UpperCamelCase : Tuple =tokenizer('maɪ c' , do_phonemize=lowerCamelCase__ ).input_ids
self.assertEqual(lowerCamelCase__ , [3, 200] ) # mai should be <unk> (=3)
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__UpperCamelCase : Optional[int] ='Hello how are you'
__UpperCamelCase : Any =tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang='en-us' )
self.assertEqual(lowerCamelCase__ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__UpperCamelCase : List[Any] ='Hello how are you'
__UpperCamelCase : str =tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowerCamelCase__ ).input_ids , tokenizer(lowerCamelCase__ , do_phonemize=lowerCamelCase__ ).input_ids )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__UpperCamelCase : Tuple ='Hello how are you'
__UpperCamelCase : Optional[int] =tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang='en-us' )
__UpperCamelCase : List[Any] =tokenizer.decode(tokenizer(lowerCamelCase__ ).input_ids )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__UpperCamelCase : int =[
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__UpperCamelCase : List[Any] =tokenizer.decode(sample_ids[0] )
__UpperCamelCase : int =tokenizer.batch_decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , batch_tokens[0] )
self.assertEqual(lowerCamelCase__ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__UpperCamelCase : List[str] ='Hello how are you'
__UpperCamelCase : Union[str, Any] =tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang='en-us' )
self.assertEqual(lowerCamelCase__ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__UpperCamelCase : Optional[Any] ='Hello how are you'
__UpperCamelCase : Any =tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowerCamelCase__ ).input_ids , tokenizer(lowerCamelCase__ , do_phonemize=lowerCamelCase__ ).input_ids )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
__UpperCamelCase : str =[
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__UpperCamelCase : Optional[int] =tokenizer.decode(sample_ids[0] )
__UpperCamelCase : int =tokenizer.batch_decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , batch_tokens[0] )
self.assertEqual(lowerCamelCase__ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
__UpperCamelCase : Tuple =tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCamelCase__ )
__UpperCamelCase : int =tokenizer.batch_decode(lowerCamelCase__ , filter_word_delimiter_token=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , batch_tokens[0] )
self.assertEqual(lowerCamelCase__ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__UpperCamelCase : str ='Hello how are you'
__UpperCamelCase : str =tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang='en-us' )
__UpperCamelCase : Dict =tokenizer.decode(tokenizer(lowerCamelCase__ ).input_ids , filter_word_delimiter_token=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__UpperCamelCase : str ='Hello how are you'
__UpperCamelCase : str =tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang='en-us' )
__UpperCamelCase : Dict =tokenizer.decode(tokenizer(lowerCamelCase__ ).input_ids , filter_word_delimiter_token=lowerCamelCase__ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ='Hello how are you'
__UpperCamelCase : Union[str, Any] =tokenizer(lowerCamelCase__ , phonemizer_lang='en-us' ).input_ids
__UpperCamelCase : str =tokenizer(lowerCamelCase__ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =tokenizer.decode(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(lowerCamelCase__ , 'ɛ l o h aʊ a ʁ j u' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__UpperCamelCase : Tuple ='Hello how Are you'
__UpperCamelCase : str ='hello how are you'
__UpperCamelCase : Union[str, Any] =tokenizer(lowerCamelCase__ ).input_ids
__UpperCamelCase : Tuple =tokenizer(lowerCamelCase__ ).input_ids
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
__UpperCamelCase : List[str] =[
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__UpperCamelCase : Dict =tokenizer.batch_decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =[d[key] for d in offsets]
return retrieved_list
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__UpperCamelCase : Tuple =[11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__UpperCamelCase : int =tokenizer.decode(lowerCamelCase__ , output_char_offsets=lowerCamelCase__ , filter_word_delimiter_token=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCamelCase__ ) )
# transform list to ModelOutput
__UpperCamelCase : int =WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(lowerCamelCase__ , lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
[recursive_check(lowerCamelCase__ , lowerCamelCase__ ) for la, la in zip(lowerCamelCase__ , lowerCamelCase__ )]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
__UpperCamelCase : List[str] =[
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__UpperCamelCase : Union[str, Any] =tokenizer.batch_decode(lowerCamelCase__ , output_char_offsets=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =[tokenizer.decode(lowerCamelCase__ , output_char_offsets=lowerCamelCase__ ) for ids in sample_ids]
check_list_tuples_equal(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def __lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def __lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def __lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def __lowercase ( self ):
"""simple docstring"""
pass
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase : Any =tokenizer.vocab_size
__UpperCamelCase : List[Any] =len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCamelCase : Any =['aaaaa bbbbbb', 'cccccccccdddddddd']
__UpperCamelCase : Optional[Any] =tokenizer.add_tokens(lowerCamelCase__ )
__UpperCamelCase : Any =tokenizer.vocab_size
__UpperCamelCase : List[Any] =len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , 0 )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , len(lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , all_size + len(lowerCamelCase__ ) )
__UpperCamelCase : Any =tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowerCamelCase__ )
self.assertGreaterEqual(len(lowerCamelCase__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__UpperCamelCase : Any ={'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__UpperCamelCase : Optional[Any] =tokenizer.add_special_tokens(lowerCamelCase__ )
__UpperCamelCase : Any =tokenizer.vocab_size
__UpperCamelCase : int =len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , 0 )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , len(lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , all_size_a + len(lowerCamelCase__ ) )
__UpperCamelCase : Dict =tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowerCamelCase__ )
self.assertGreaterEqual(len(lowerCamelCase__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def __lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def __lowercase ( self ):
"""simple docstring"""
pass
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =self.get_tokenizers(fast=lowerCamelCase__ , do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase : int =['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
__UpperCamelCase : List[Any] =tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertIsInstance(output['text'] , lowerCamelCase__ )
| 245 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ :int = logging.get_logger(__name__)
A_ :List[str] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[Any] ="""xlm-roberta-xl"""
def __init__( self , lowerCamelCase__=250880 , lowerCamelCase__=2560 , lowerCamelCase__=36 , lowerCamelCase__=32 , lowerCamelCase__=10240 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=514 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : Optional[int] =hidden_size
__UpperCamelCase : Tuple =num_hidden_layers
__UpperCamelCase : List[Any] =num_attention_heads
__UpperCamelCase : Tuple =hidden_act
__UpperCamelCase : str =intermediate_size
__UpperCamelCase : str =hidden_dropout_prob
__UpperCamelCase : List[str] =attention_probs_dropout_prob
__UpperCamelCase : Any =max_position_embeddings
__UpperCamelCase : List[str] =type_vocab_size
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : Tuple =layer_norm_eps
__UpperCamelCase : Dict =position_embedding_type
__UpperCamelCase : Dict =use_cache
__UpperCamelCase : Optional[int] =classifier_dropout
class __A ( a ):
"""simple docstring"""
@property
def __lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
__UpperCamelCase : int ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase : Optional[Any] ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 245 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A = 50 ) -> int:
"""simple docstring"""
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class __magic_name__ :
def __init__( self : Dict ):
'''simple docstring'''
lowercase :Dict = {}
def __snake_case ( self : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Union[str, Any] = {}
def __snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(snake_case__ )
if nodea not in self.connections:
self.add_node(snake_case__ )
lowercase :Dict = probability
def __snake_case ( self : int ):
'''simple docstring'''
return list(self.connections )
def __snake_case ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :Union[str, Any] = 0
lowercase :int = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase (a_ :str , a_ :list[tuple[str, str, float]] , a_ :int) -> Optional[int]:
lowercase :int = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
lowercase :List[str] = Counter(graph.get_nodes())
lowercase :List[Any] = start
for _ in range(lowerCamelCase__):
lowercase :Tuple = graph.transition(lowerCamelCase__)
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( __UpperCAmelCase ):
__A : UNetaDModel
__A : ScoreSdeVeScheduler
def __init__( self : List[Any] , snake_case__ : UNetaDModel , snake_case__ : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self : Optional[Any] , snake_case__ : int = 1 , snake_case__ : int = 2_0_0_0 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
lowercase :List[str] = self.unet.config.sample_size
lowercase :Optional[int] = (batch_size, 3, img_size, img_size)
lowercase :Optional[Any] = self.unet
lowercase :Dict = randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase :Any = sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase :Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase :Union[str, Any] = self.unet(snake_case__ , snake_case__ ).sample
lowercase :Union[str, Any] = self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase :List[str] = model(snake_case__ , snake_case__ ).sample
lowercase :Optional[int] = self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase , lowercase :Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase :List[Any] = sample_mean.clamp(0 , 1 )
lowercase :Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase :Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 172 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : Dict = 16
a : Dict = 32
def lowercase ( __magic_name__ , __magic_name__ = 16 ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase : List[str] = datasets.map(
__A , batched=__A , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase : List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase : int = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase : List[Any] = 8
else:
UpperCAmelCase : int = None
return tokenizer.pad(
__A , padding="longest" , max_length=__A , pad_to_multiple_of=__A , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["train"] , shuffle=__A , collate_fn=__A , batch_size=__A )
UpperCAmelCase : int = DataLoader(
tokenized_datasets["validation"] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Any = mocked_dataloaders # noqa: F811
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __A ) == "1":
UpperCAmelCase : str = 2
# Initialize accelerator
UpperCAmelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase : List[str] = config["lr"]
UpperCAmelCase : Any = int(config["num_epochs"] )
UpperCAmelCase : Dict = int(config["seed"] )
UpperCAmelCase : List[str] = int(config["batch_size"] )
UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase : str = MAX_GPU_BATCH_SIZE
set_seed(__A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_dataloaders(__A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase : int = AdamW(params=model.parameters() , lr=__A )
# Instantiate scheduler
UpperCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase : List[str] = model(**__A )
UpperCAmelCase : Optional[int] = outputs.loss
UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase : Tuple = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
UpperCAmelCase : Any = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase : Optional[int] = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__A , references=__A , )
UpperCAmelCase : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __A )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase : List[Any] = parser.parse_args()
UpperCAmelCase : List[str] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 311 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 | 0 |
"""simple docstring"""
import os
import string
import sys
__A : Any = 1 << 8
__A : Tuple = {
'tab': ord('''\t'''),
'newline': ord('''\r'''),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__A : Any = KEYMAP['up']
__A : Union[str, Any] = KEYMAP['left']
if sys.platform == "win32":
__A : Dict = []
__A : Any = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__A : int = ord(str(i))
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : int = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCAmelCase ) == 0:
# Read the keystroke
lowerCAmelCase : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : List[str] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : Tuple = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_UpperCAmelCase )
if ord(_UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase : List[str] = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase : Tuple = cha[1]
else:
lowerCAmelCase : Optional[Any] = ch.decode(_UpperCAmelCase )
else:
lowerCAmelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : Dict = sys.stdin.fileno()
lowerCAmelCase : Tuple = termios.tcgetattr(_UpperCAmelCase )
try:
tty.setraw(_UpperCAmelCase )
lowerCAmelCase : int = sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCAmelCase, termios.TCSADRAIN, _UpperCAmelCase )
return ch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase : List[Any] = get_raw_chars()
if ord(_UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCAmelCase ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(_UpperCAmelCase ) == KEYMAP["mod_int"]:
lowerCAmelCase : List[str] = get_raw_chars()
if ord(_UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 359 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A__ : Tuple = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
def __init__( self : Dict , *snake_case__ : Tuple , **snake_case__ : Optional[Any] ):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''dpt'''
def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : Tuple=12 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : Union[str, Any]=3_072 , __UpperCAmelCase : int="gelu" , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Dict=1e-1_2 , __UpperCAmelCase : int=384 , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[str]=[2, 5, 8, 11] , __UpperCAmelCase : List[Any]="project" , __UpperCAmelCase : Optional[int]=[4, 2, 1, 0.5] , __UpperCAmelCase : Union[str, Any]=[96, 192, 384, 768] , __UpperCAmelCase : List[Any]=256 , __UpperCAmelCase : Dict=-1 , __UpperCAmelCase : str=False , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : int=0.4 , __UpperCAmelCase : Union[str, Any]=255 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[int]=[1, 1_024, 24, 24] , __UpperCAmelCase : Dict=[0, 1] , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : List[str] , ) ->Tuple:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a = hidden_size
a = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
a = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
a = BitConfig(**__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
a = BitConfig(**__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
a = backbone_featmap_shape
a = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
a = None
a = None
a = []
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
a = readout_type
a = reassemble_factors
a = neck_hidden_sizes
a = fusion_hidden_size
a = head_in_index
a = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
a = use_auxiliary_head
a = auxiliary_loss_weight
a = semantic_loss_ignore_index
a = semantic_classifier_dropout
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
a = self.backbone_config.to_dict()
a = self.__class__.model_type
return output
| 26 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ = "bart"
UpperCAmelCase__ = True
@st.cache(allow_output_mutation=a )
def _a ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a = qar_model.eval()
else:
a , a = (None, None)
if MODEL_TYPE == "bart":
a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a = sas_model.eval()
else:
a , a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Dict:
if LOAD_DENSE_INDEX:
a = faiss.StandardGpuResources()
a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
a = faiss.IndexFlatIP(128 )
a = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
a , a = (None, None)
a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Optional[int]:
a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a = elia['''train_eli5''']
a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_models()
UpperCAmelCase__ , UpperCAmelCase__ = load_train_data()
def _a ( a :str , a :Tuple=10 ) -> List[str]:
a = embed_questions_for_retrieval([question] , a , a )
a , a = eli5_train_q_index.search(a , a )
a = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _a ( a :str , a :Any="wiki40b" , a :int="dense" , a :Union[str, Any]=10 ) -> List[str]:
if source == "none":
a , a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a , a = query_qa_dense_index(
a , a , a , a , a , a )
else:
a , a = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _a ( a :Tuple , a :int , a :int , a :Dict=64 , a :List[Any]=256 , a :List[Any]=False , a :List[Any]=2 , a :Tuple=0.95 , a :Optional[Any]=0.8 ) -> int:
with torch.no_grad():
a = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ = action_list.index(action_st)
UpperCAmelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ = show_type == "Show full text of passages"
else:
UpperCAmelCase__ = 3
UpperCAmelCase__ = True
UpperCAmelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ = "wiki40b"
UpperCAmelCase__ = "dense"
UpperCAmelCase__ = "beam"
UpperCAmelCase__ = 2
UpperCAmelCase__ = 64
UpperCAmelCase__ = 256
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ = None
# start main text
UpperCAmelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ = support_list[:10]
UpperCAmelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ = sec_titles.split(" & ")
UpperCAmelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ = find_nearest_training(question)
UpperCAmelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_config()
lowerCamelCase = 300
return config
def _lowerCAmelCase ( self ):
"""simple docstring"""
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase = True
lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = MraModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , token_type_ids=_a )
lowerCamelCase = model(_a , token_type_ids=_a )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = MraModel(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
lowerCamelCase = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
lowerCamelCase = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.num_labels
lowerCamelCase = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.num_labels
lowerCamelCase = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.num_choices
lowerCamelCase = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = ()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = MraModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase = type
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason="""MRA does not output attentions""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase = model(_a )[0]
lowerCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _a )
lowerCamelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_265
lowerCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _a )
lowerCamelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
lowerCamelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_265
lowerCamelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _a )
lowerCamelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291 |
"""simple docstring"""
def a__ ( snake_case__ ) -> list:
if len(snake_case__ ) < 2:
return collection
def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool:
lowerCamelCase = False
if low == high:
return swapped
lowerCamelCase = low
lowerCamelCase = high
while left < right:
if collection[left] > collection[right]:
lowerCamelCase , lowerCamelCase = (
collection[right],
collection[left],
)
lowerCamelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCamelCase , lowerCamelCase = (
collection[right + 1],
collection[left],
)
lowerCamelCase = True
lowerCamelCase = low + int((high - low) / 2 )
lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ )
return swapped or left_swap or right_swap
lowerCamelCase = True
while is_not_sorted is True:
lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 291 | 1 |
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
A = mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
A = max(
mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , j - wt[i - 1] ) + val[i - 1] , )
A = val
return f[i][j]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
A = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not (isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(_lowerCamelCase , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
A = len(_lowerCamelCase )
if num_items != len(_lowerCamelCase ):
A = (
"The number of weights must be the same as the number of values.\n"
f"""But got {num_items} weights and {len(_lowerCamelCase )} values"""
)
raise ValueError(_lowerCamelCase )
for i in range(_lowerCamelCase ):
if not isinstance(wt[i] , _lowerCamelCase ):
A = (
"All weights must be integers but got weight of "
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowerCamelCase )
A = knapsack(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A = set()
_construct_solution(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return optimal_val, example_optional_set
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowerCamelCase , _lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
else:
optimal_set.add(_lowerCamelCase )
_construct_solution(_lowerCamelCase , _lowerCamelCase , i - 1 , j - wt[i - 1] , _lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = [3, 2, 4, 4]
__SCREAMING_SNAKE_CASE = [4, 3, 2, 3]
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 6
__SCREAMING_SNAKE_CASE = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset) | 371 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
__SCREAMING_SNAKE_CASE = {"""bert_for_seq_generation""": 512}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = []
a__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Optional[int]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Tuple , ) -> None:
A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
A : Union[str, Any] = vocab_file
A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
A : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Tuple:
A : Tuple = self.__dict__.copy()
A : Optional[int] = None
return state
def __setstate__( self : Dict , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A : int = {}
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] ) -> Dict:
return self.sp_model.piece_to_id(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
A : Optional[int] = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[int] ) -> List[str]:
A : List[str] = []
A : List[str] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
A : Union[str, Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : str = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
A : str = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,) | 256 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
snake_case__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
snake_case__ : Union[str, Any] = parser.parse_args()
snake_case__ : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
snake_case__ : List[Any] = CLIPImageProcessor()
snake_case__ : Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
snake_case__ : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 117 |
'''simple docstring'''
def A_ ( snake_case = 100 ):
SCREAMING_SNAKE_CASE:Optional[Any] = set()
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Optional[Any] = n + 1 # maximum limit
for a in range(2 , snake_case ):
for b in range(2 , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = a**b # calculates the current power
collect_powers.add(snake_case ) # adds the result to the set
return len(snake_case )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 139 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A =logging.get_logger(__name__)
__A ={
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _snake_case ( a__ , a__ ):
lowerCAmelCase :Optional[int] = '''nat'''
lowerCAmelCase :List[str] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=64 , _lowerCamelCase=[3, 4, 6, 5] , _lowerCamelCase=[2, 4, 8, 16] , _lowerCamelCase=7 , _lowerCamelCase=3.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = embed_dim
UpperCAmelCase__ : List[str] = depths
UpperCAmelCase__ : Union[str, Any] = len(_lowerCamelCase)
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : int = kernel_size
UpperCAmelCase__ : Tuple = mlp_ratio
UpperCAmelCase__ : Union[str, Any] = qkv_bias
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = drop_path_rate
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ : Dict = int(embed_dim * 2 ** (len(_lowerCamelCase) - 1))
UpperCAmelCase__ : str = layer_scale_init_value
UpperCAmelCase__ : Tuple = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase) + 1)]
UpperCAmelCase__ , UpperCAmelCase__ : Dict = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names) | 283 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False ):
UpperCAmelCase__ : str = """backbone.""" if is_semantic else """"""
UpperCAmelCase__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase__ : Optional[Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
UpperCAmelCase__ : Any = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ : List[str] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ : int = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Any = q_bias
UpperCAmelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase__ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase__ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase__ : Union[str, Any] = gamma_a
UpperCAmelCase__ : str = gamma_a
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = val
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
UpperCAmelCase__ : Optional[Any] = False if """rvlcdip""" in checkpoint_url else True
UpperCAmelCase__ : Any = BeitConfig(use_absolute_position_embeddings=UpperCamelCase__ , use_mask_token=UpperCamelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase__ : Optional[Any] = 1_0_2_4
UpperCAmelCase__ : Dict = 4_0_9_6
UpperCAmelCase__ : Any = 2_4
UpperCAmelCase__ : Tuple = 1_6
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase__ : int = 1_6
UpperCAmelCase__ : List[str] = """huggingface/label-files"""
UpperCAmelCase__ : Optional[Any] = """rvlcdip-id2label.json"""
UpperCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = idalabel
UpperCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ : List[str] = create_rename_keys(UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
# load HuggingFace model
UpperCAmelCase__ : str = BeitForMaskedImageModeling(UpperCamelCase__ ) if has_lm_head else BeitForImageClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
UpperCAmelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ )
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[Any] = encoding["""pixel_values"""]
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase__ )
UpperCAmelCase__ : int = outputs.logits
# verify logits
UpperCAmelCase__ : int = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(UpperCamelCase__ ), "Shape of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
if has_lm_head:
UpperCAmelCase__ : Union[str, Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
UpperCAmelCase__ : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__A =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 283 | 1 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__SCREAMING_SNAKE_CASE :Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase_ ( __lowercase : Dict ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = r".*/layers_(\d+)"
_UpperCAmelCase = key
if re.match(__lowercase , __lowercase ):
_UpperCAmelCase = re.sub(r"layers_(\d+)" , r"block/\1/layer" , __lowercase )
_UpperCAmelCase = r"(encoder|decoder)\/"
if re.match(__lowercase , __lowercase ):
_UpperCAmelCase = re.match(__lowercase , __lowercase ).groups()
if groups[0] == "encoder":
_UpperCAmelCase = re.sub(r"/mlp/" , r"/1/mlp/" , __lowercase )
_UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , __lowercase )
elif groups[0] == "decoder":
_UpperCAmelCase = re.sub(r"/mlp/" , r"/2/mlp/" , __lowercase )
_UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , __lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCAmelCase = new_key.replace(__lowercase , __lowercase )
print(f'{key} -> {new_key}' )
_UpperCAmelCase = s_dict.pop(__lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCAmelCase = s_dict[key].shape[0]
_UpperCAmelCase = s_dict[key]
for idx in range(__lowercase ):
_UpperCAmelCase = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(__lowercase )
return s_dict
__SCREAMING_SNAKE_CASE :Any = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : List[str] ) -> Dict:
'''simple docstring'''
import regex as re
with open(__lowercase , "r" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = re.findall(r"(.*) = ([0-9.]*)" , __lowercase )
_UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCAmelCase = float(__lowercase ) if "." in value else int(__lowercase )
_UpperCAmelCase = re.findall(r"(.*activations) = \(\'(.*)\',\)" , __lowercase )[0]
_UpperCAmelCase = str(activation[1] )
_UpperCAmelCase = num_experts
_UpperCAmelCase = SwitchTransformersConfig(**__lowercase )
return config
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : List[str] , __lowercase : str=None , __lowercase : List[str]="./" , __lowercase : Optional[int]=8 ) -> Optional[int]:
'''simple docstring'''
print(f'Loading flax weights from : {flax_checkpoint_path}' )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(__lowercase )
if gin_file is not None:
_UpperCAmelCase = convert_gin_to_config(__lowercase , __lowercase )
else:
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained(__lowercase )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration(__lowercase )
_UpperCAmelCase = flax_params["target"]
_UpperCAmelCase = flatten_dict(__lowercase , sep="/" )
_UpperCAmelCase = rename_keys(__lowercase )
_UpperCAmelCase = unflatten_dict(__lowercase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__lowercase , __lowercase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__SCREAMING_SNAKE_CASE :Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 22 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ :
_lowerCamelCase : str
_lowerCamelCase : str = None
@staticmethod
def lowercase ( ):
raise NotImplementedError
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ):
raise NotImplementedError
def lowercase ( self : Any , snake_case_ : int ):
raise NotImplementedError
def lowercase ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowercase ( cls : List[Any] ):
return f'`pip install {cls.pip_package or cls.name}`'
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """optuna"""
@staticmethod
def lowercase ( ):
return is_optuna_available()
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ):
return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : Optional[int] ):
return default_hp_space_optuna(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """ray"""
_lowerCamelCase : Tuple = """'ray[tune]'"""
@staticmethod
def lowercase ( ):
return is_ray_available()
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ):
return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : str ):
return default_hp_space_ray(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigopt"""
@staticmethod
def lowercase ( ):
return is_sigopt_available()
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ):
return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Optional[Any] ):
return default_hp_space_sigopt(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """wandb"""
@staticmethod
def lowercase ( ):
return is_wandb_available()
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ):
return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : Union[str, Any] ):
return default_hp_space_wandb(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22 | 1 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Optional[Any] = (DDIMParallelScheduler,)
lowercase : Any = (('eta', 0.0), ('num_inference_steps', 5_0))
def a_ ( self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def a_ ( self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = self.scheduler_classes[0]
UpperCamelCase : Tuple = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Optional[Any] = 10, 0.0
UpperCamelCase : Optional[Any] = self.dummy_model()
UpperCamelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def a_ ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.scheduler_classes[0]
UpperCamelCase : Tuple = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase : str = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def a_ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def a_ ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Dict = self.scheduler_classes[0]
UpperCamelCase : Optional[int] = self.get_scheduler_config()
UpperCamelCase : Any = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def a_ ( self ):
UpperCamelCase : Dict = self.scheduler_classes[0]
UpperCamelCase : str = self.get_scheduler_config()
UpperCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : int = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.dummy_model()
UpperCamelCase : Optional[int] = self.dummy_sample_deter
UpperCamelCase : Optional[Any] = self.dummy_sample_deter + 0.1
UpperCamelCase : Any = self.dummy_sample_deter - 0.1
UpperCamelCase : Any = samplea.shape[0]
UpperCamelCase : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase : int = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase : Optional[int] = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def a_ ( self ):
UpperCamelCase : Tuple = self.full_loop()
UpperCamelCase : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : List[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def a_ ( self ):
UpperCamelCase : Tuple = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def a_ ( self ):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
UpperCamelCase : Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def a_ ( self ):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase : Optional[Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 27 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=3.6 ):
UpperCamelCase : Dict = tokenizer
UpperCamelCase : Optional[Any] = tokenizer.bos_token_id
UpperCamelCase : Any = dataset
UpperCamelCase : List[str] = seq_length
UpperCamelCase : Optional[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
UpperCamelCase : Dict = iter(self.dataset )
UpperCamelCase : Union[str, Any] = True
while more_examples:
UpperCamelCase , UpperCamelCase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(SCREAMING_SNAKE_CASE_ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCamelCase : Dict = False
break
UpperCamelCase : str = tokenizer(SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )["""input_ids"""]
UpperCamelCase : str = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , self.seq_length ):
UpperCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(SCREAMING_SNAKE_CASE_ ) == self.seq_length:
yield torch.tensor(SCREAMING_SNAKE_CASE_ )
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
UpperCamelCase : Dict = {"""streaming""": True}
UpperCamelCase : Optional[int] = load_dataset(args.dataset_name ,split="""train""" ,**snake_case_ )
UpperCamelCase : Optional[int] = ConstantLengthDataset(snake_case_ ,snake_case_ ,seq_length=args.seq_length )
UpperCamelCase : List[Any] = DataLoader(snake_case_ ,batch_size=args.batch_size )
return eval_dataloader
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
model.eval()
UpperCamelCase : Dict = []
for step, batch in enumerate(snake_case_ ):
with torch.no_grad():
UpperCamelCase : List[Any] = model(snake_case_ ,labels=snake_case_ )
UpperCamelCase : Any = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(snake_case_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCamelCase : Dict = torch.mean(torch.cat(snake_case_ ) )
try:
UpperCamelCase : Dict = torch.exp(snake_case_ )
except OverflowError:
UpperCamelCase : Optional[int] = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
__A : List[Any] = Accelerator()
# Parse configuration
__A : str = HfArgumentParser(EvaluationArguments)
__A : List[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
__A : Any = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__A : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__A : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__A : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
__A , __A : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__A , __A : Tuple = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 27 | 1 |
'''simple docstring'''
from manim import *
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> int:
lowercase__ : str = Rectangle(height=0.5 , width=0.5 )
lowercase__ : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase__ : Dict = Rectangle(height=0.2_5 , width=0.2_5 )
lowercase__ : List[Any] = [mem.copy() for i in range(6 )]
lowercase__ : Any = [mem.copy() for i in range(6 )]
lowercase__ : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Tuple = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Dict = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : int = Text('''CPU''' , font_size=24 )
lowercase__ : Optional[int] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
lowercase__ : Tuple = [mem.copy() for i in range(4 )]
lowercase__ : Tuple = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Optional[int] = Text('''GPU''' , font_size=24 )
lowercase__ : int = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
lowercase__ : List[str] = [mem.copy() for i in range(6 )]
lowercase__ : Dict = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Tuple = Text('''Model''' , font_size=24 )
lowercase__ : Dict = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
lowercase__ : Any = []
lowercase__ : int = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase__ : Tuple = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 )
target.move_to(__lowerCAmelCase )
model_arr.append(__lowerCAmelCase )
lowercase__ : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase__ : int = [meta_mem.copy() for i in range(6 )]
lowercase__ : int = [meta_mem.copy() for i in range(6 )]
lowercase__ : Tuple = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Optional[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : List[str] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Optional[Any] = Text('''Disk''' , font_size=24 )
lowercase__ : Union[str, Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4, -1.2_5, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : str = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
lowercase__ : Tuple = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) )
lowercase__ : List[str] = Square(0.3 )
input.set_fill(__lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 )
self.play(Write(__lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.0_2 )
self.play(MoveToTarget(__lowerCAmelCase ) )
self.play(FadeOut(__lowerCAmelCase ) )
lowercase__ : Tuple = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase__ : Any = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
lowercase__ : int = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.0_2}
self.play(
Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase__ : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , __lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
lowercase__ : Optional[Any] = AnimationGroup(
FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase__ : Union[str, Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase__ : int = a_c
lowercase__ : List[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , )
lowercase__ : Any = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) )
self.wait()
| 198 | '''simple docstring'''
import requests
__a: str = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __UpperCamelCase ( UpperCAmelCase ):
# fetching a list of articles in json format
lowercase__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 198 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int , snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase__ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowercase ( __lowercase , __lowercase ) -> list[int]:
'''simple docstring'''
_A = 0
_A = len(__lowercase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_A = i + 1
else:
_A = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 79 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Dict = BarthezTokenizer
A__ : List[Any] = BarthezTokenizerFast
A__ : int = True
A__ : str = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCamelCase )
UpperCamelCase_ = tokenizer
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """<pad>"""
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__UpperCamelCase ) , 1_0_1_1_2_2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
UpperCamelCase_ = self.tokenizer(
__UpperCamelCase , max_length=len(__UpperCamelCase ) , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = """I was born in 92000, and this is falsé."""
UpperCamelCase_ = tokenizer.tokenize(__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__UpperCamelCase , )
| 122 | 0 |
class _a :
def __init__( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] )-> Tuple:
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Union[str, Any] = graph
self._normalize_graph(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = None
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Union[str, Any]:
if sources is int:
lowerCAmelCase__ : List[Any] = [sources]
if sinks is int:
lowerCAmelCase__ : Optional[Any] = [sinks]
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
return
lowerCAmelCase__ : Union[str, Any] = sources[0]
lowerCAmelCase__ : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_SCREAMING_SNAKE_CASE ) > 1 or len(_SCREAMING_SNAKE_CASE ) > 1:
lowerCAmelCase__ : List[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ : Any = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ : Optional[Any] = max_input_flow
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ : str = max_input_flow
lowerCAmelCase__ : Tuple = size - 1
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : List[Any] )-> int:
lowerCAmelCase__ : int = algorithm(self )
class _a :
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple )-> Union[str, Any]:
lowerCAmelCase__ : Tuple = flow_network
lowerCAmelCase__ : Dict = flow_network.verticesCount
lowerCAmelCase__ : Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ : Optional[Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ : str = flow_network.graph
lowerCAmelCase__ : Optional[int] = False
def UpperCAmelCase__( self : List[str] )-> Dict:
if not self.executed:
self._algorithm()
lowerCAmelCase__ : Any = True
def UpperCAmelCase__( self : Optional[Any] )-> int:
pass
class _a ( _lowercase):
def __init__( self : Any , _SCREAMING_SNAKE_CASE : List[Any] )-> Union[str, Any]:
super().__init__(_SCREAMING_SNAKE_CASE )
# use this to save your result
lowerCAmelCase__ : Dict = -1
def UpperCAmelCase__( self : Any )-> Optional[Any]:
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class _a ( _lowercase):
def __init__( self : Any , _SCREAMING_SNAKE_CASE : List[str] )-> List[str]:
super().__init__(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ : Optional[Any] = [0] * self.verticies_count
lowerCAmelCase__ : str = [0] * self.verticies_count
def UpperCAmelCase__( self : Any )-> List[Any]:
lowerCAmelCase__ : Optional[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ : Any = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ : str = 0
while i < len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Union[str, Any] = vertices_list[i]
lowerCAmelCase__ : Any = self.heights[vertex_index]
self.process_vertex(_SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Optional[Any] = 0
else:
i += 1
lowerCAmelCase__ : Optional[Any] = sum(self.preflow[self.source_index] )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Optional[int]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.relabel(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ : List[Any] = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ : Optional[Any] = min_height + 1
if __name__ == "__main__":
lowerCamelCase = [0]
lowerCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCamelCase = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 211 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = filter(lambda _a : p.requires_grad , model.parameters() )
lowerCAmelCase__ : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase = logging.getLogger(__name__)
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
if metric == "rouge2":
lowerCAmelCase__ : Optional[int] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCAmelCase__ : Optional[int] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCAmelCase__ : List[Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
lowerCAmelCase__ : Dict = ModelCheckpoint(
dirpath=_a , filename=_a , monitor=f'val_{metric}' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_a , verbose=_a , )
class _a ( pl.Callback):
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
lowerCAmelCase__ : Dict = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str]=True )-> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
lowerCAmelCase__ : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCAmelCase__ : List[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase__ : Optional[int] = od / '''test_results.txt'''
lowerCAmelCase__ : Tuple = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase__ : int = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
lowerCAmelCase__ : int = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , '''a+''' ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase__ : Optional[int] = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
lowerCAmelCase__ : List[str] = val.item()
lowerCAmelCase__ : List[str] = F'{key}: {val:.6f}\n'
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase__ : Dict = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Optional[int]:
try:
lowerCAmelCase__ : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase__ : Optional[Any] = pl_module.model.num_parameters()
lowerCAmelCase__ : Dict = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule )-> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''test''' )
@rank_zero_only
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : List[Any] )-> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 211 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( ):
lowercase_ :List[str] = 0
for i in range(1 ,10_01 ):
total += i**i
return str(__lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 147 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int | float | str ):
try:
lowercase_ :Optional[int] = float(__lowerCamelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
lowercase_ :Dict = decimal - int(__lowerCamelCase )
if fractional_part == 0:
return int(__lowerCamelCase ), 1
else:
lowercase_ :Tuple = len(str(__lowerCamelCase ).split("." )[1] )
lowercase_ :Optional[Any] = int(decimal * (10**number_of_frac_digits) )
lowercase_ :Dict = 10**number_of_frac_digits
lowercase_ , lowercase_ :Optional[int] = denominator, numerator
while True:
lowercase_ :Any = dividend % divisor
if remainder == 0:
break
lowercase_ , lowercase_ :Optional[int] = divisor, remainder
lowercase_ , lowercase_ :Optional[int] = numerator / divisor, denominator / divisor
return int(__lowerCamelCase ), int(__lowerCamelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 147 | 1 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
super().__init__()
self.register_modules(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase )
def __call__( self ) -> str:
lowerCAmelCase__ : Tuple = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,)
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : Union[str, Any] = self.unet(__UpperCAmelCase ,__UpperCAmelCase ).sample
lowerCAmelCase__ : List[Any] = self.scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ).prev_sample
lowerCAmelCase__ : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(__UpperCAmelCase )
return result
| 37 |
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 361 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ = Lock()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase = Pipe()
lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase = temp_rs
lowerCAmelCase = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
lowerCAmelCase = Pipe()
lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase = temp_rs
lowerCAmelCase = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print("""Sorted List\n""" )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 | 0 |
'''simple docstring'''
a_ = [0, 2, 4, 6, 8]
a_ = [1, 3, 5, 7, 9]
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[Any] =0
for digit in range(1_0 ):
SCREAMING_SNAKE_CASE__ : List[Any] =digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 1_0, UpperCamelCase__, UpperCamelCase__ )
return result
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
for digita in range(1_0 ):
SCREAMING_SNAKE_CASE__ : Tuple =digita
if (remainder + digita) % 2 == 0:
SCREAMING_SNAKE_CASE__ : str =ODD_DIGITS
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =EVEN_DIGITS
for digita in other_parity_digits:
SCREAMING_SNAKE_CASE__ : Optional[Any] =digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 1_0, UpperCamelCase__, UpperCamelCase__, )
return result
def _a( UpperCamelCase__ : int = 9 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =0
for length in range(1, max_power + 1 ):
result += reversible_numbers(UpperCamelCase__, 0, [0] * length, UpperCamelCase__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''') | 222 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE__ : List[Any] =repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE__ : List[Any] =sorted([comment for comment in issue.get_comments()], key=lambda UpperCamelCase__ : i.created_at, reverse=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 222 | 1 |
'''simple docstring'''
_A : List[str] ='''Alexander Joslin'''
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
lowerCamelCase__ : Stack[int] = Stack()
lowerCamelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCamelCase )
elif i == ")":
# RULE 4
lowerCamelCase__ : Optional[Any] = operator_stack.peek()
operator_stack.pop()
lowerCamelCase__ : Dict = operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ : Optional[int] = operators[opr](UpperCamelCase , UpperCamelCase )
operand_stack.push(UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A : Optional[Any] ='''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 41 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = 0
if start < end:
lowerCAmelCase_ : Dict = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[str] = a[end]
lowerCAmelCase_ : List[str] = a[pivot]
lowerCAmelCase_ : Any = temp
lowerCAmelCase_ , lowerCAmelCase_ : Any = _in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 )
count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase )
return count
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Tuple = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : str = a[end]
lowerCAmelCase_ : List[Any] = a[pivot]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Dict = start - 1
for index in range(__UpperCamelCase , __UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase_ : Dict = new_pivot_index + 1
lowerCAmelCase_ : Tuple = a[new_pivot_index]
lowerCAmelCase_ : List[Any] = a[index]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Any = a[new_pivot_index + 1]
lowerCAmelCase_ : int = a[end]
lowerCAmelCase_ : str = temp
return new_pivot_index + 1, count
lowercase__ = TemporaryFile()
lowercase__ = 100 # 1000 elements are to be sorted
lowercase__ , lowercase__ = 0, 1 # mean and standard deviation
lowercase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowercase__ = np.load(outfile)
lowercase__ = len(M) - 1
lowercase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 241 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Tuple = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Tuple = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowercase__ : List[str] = remove_duplicates(key.upper() )
lowercase__ : str = len(lowerCAmelCase_ )
# First fill cipher with key characters
lowercase__ : List[str] = {alphabet[i]: char for i, char in enumerate(lowerCAmelCase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCAmelCase_ ) , 26 ):
lowercase__ : Tuple = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowercase__ : Tuple = alphabet[i - offset]
lowercase__ : Dict = char
return cipher_alphabet
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
return "".join(cipher_map.get(lowerCAmelCase_ , lowerCAmelCase_ ) for ch in message.upper() )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCAmelCase_ , lowerCAmelCase_ ) for ch in message.upper() )
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = input('''Enter message to encode or decode: ''' ).strip()
lowercase__ : Union[str, Any] = input('''Enter keyword: ''' ).strip()
lowercase__ : List[str] = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowercase__ : Dict = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowercase__ : List[Any] = create_cipher_map(lowerCAmelCase_ )
print(func(lowerCAmelCase_ , lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 369 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowercase__ : Tuple = sorted(string.lower() )
return len(UpperCAmelCase ) == len(set(UpperCAmelCase ) )
if __name__ == "__main__":
__a: Union[str, Any] = input("""Enter a string """).strip()
__a: Tuple = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
| 214 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : int ="""openai/whisper-base"""
UpperCAmelCase__ : Dict =(
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase__ : List[str] ="""transcriber"""
UpperCAmelCase__ : Union[str, Any] =WhisperProcessor
UpperCAmelCase__ : Union[str, Any] =WhisperForConditionalGeneration
UpperCAmelCase__ : Tuple =["""audio"""]
UpperCAmelCase__ : List[Any] =["""text"""]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return self.pre_processor(UpperCAmelCase__ , return_tensors="""pt""" ).input_features
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[int] ) ->Dict:
"""simple docstring"""
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0]
| 245 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowercase ( _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE : Tuple = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(_A ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : str = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : str = Spark(_A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : Tuple = spark.range(10 ).repartition(2 )
SCREAMING_SNAKE_CASE : Any = [1, 0]
SCREAMING_SNAKE_CASE : Dict = _generate_iterable_examples(_A , _A ) # Reverse the partitions.
SCREAMING_SNAKE_CASE : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , _A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : List[str] = spark.range(10 ).repartition(1 )
SCREAMING_SNAKE_CASE : Optional[Any] = SparkExamplesIterable(_A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_A ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : Any = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
SCREAMING_SNAKE_CASE : int = lambda _A : x.reverse()
SCREAMING_SNAKE_CASE : int = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , [2, 1, 0] )
SCREAMING_SNAKE_CASE : Any = SparkExamplesIterable(_A ).shuffle_data_sources(_A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> str:
SCREAMING_SNAKE_CASE : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE : str = SparkExamplesIterable(_A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , [0, 2] )
for i, (row_id, row_dict) in enumerate(_A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE : int = SparkExamplesIterable(_A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , [1, 3] )
for i, (row_id, row_dict) in enumerate(_A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : str = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : Any = Spark(_A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 245 | 1 |
def lowerCamelCase__ ( a = 10**9 ) -> Tuple:
_A: Any = 1
_A: Dict = 2
_A: str = 0
_A: Union[str, Any] = 0
_A: str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A: int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 370 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase__ = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _A ( A__ ):
"""simple docstring"""
if "://" in dataset_path:
__lowercase = dataset_path.split('''://''' )[1]
return dataset_path
def _A ( A__ ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = not is_remote_filesystem(A__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(A__ ) , fs._strip_protocol(A__ ) )
else:
fs.mv(A__ , A__ , recursive=A__ )
def _A ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowercase = None
__lowercase = None
__lowercase = threading.Lock()
| 104 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 104 | 1 |
"""simple docstring"""
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = [1]
for i in range(2, lowerCAmelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowerCAmelCase = []
__lowerCAmelCase = list(range(lowerCAmelCase_ ) )
# Find permutation
while factorials:
__lowerCAmelCase = factorials.pop()
__lowerCAmelCase , __lowerCAmelCase = divmod(lowerCAmelCase_, lowerCAmelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def lowercase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=1_0 , )
return model
@property
def lowercase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
__lowerCAmelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = AudioDiffusionPipeline(vqvae=lowerCAmelCase_ , unet=self.dummy_unet , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 , return_dict=lowerCAmelCase_ )
__lowerCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = self.dummy_vqvae_and_unet
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(raw_audio=lowerCAmelCase_ , generator=lowerCAmelCase_ , start_step=5 , steps=1_0 )
__lowerCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = self.dummy_unet_condition
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase_ , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = torch.rand((1, 1, 1_0) )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , encoding=lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 207 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 130 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCamelCase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCamelCase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCamelCase__ )
return parser.parse_args()
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = parse_args()
# Import training_script as a module.
lowercase__ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase__ : Dict = script_fpath.stem
lowercase__ : int = importlib.import_module(lowerCamelCase__ )
# Patch sys.argv
lowercase__ : Dict = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 130 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Dict = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[int] = 'deformable_detr'
lowercase : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=3_00 , __UpperCamelCase=10_24 , __UpperCamelCase=6 , __UpperCamelCase=10_24 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=10_24 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=False , __UpperCamelCase=3_00 , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=0.25 , __UpperCamelCase=False , **__UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCamelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : Optional[int] = backbone_config.get("model_type" )
__UpperCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : List[str] = config_class.from_dict(__UpperCamelCase )
__UpperCamelCase : int = use_timm_backbone
__UpperCamelCase : Optional[Any] = backbone_config
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Optional[int] = num_queries
__UpperCamelCase : str = max_position_embeddings
__UpperCamelCase : Optional[Any] = d_model
__UpperCamelCase : Dict = encoder_ffn_dim
__UpperCamelCase : Tuple = encoder_layers
__UpperCamelCase : Any = encoder_attention_heads
__UpperCamelCase : Any = decoder_ffn_dim
__UpperCamelCase : List[Any] = decoder_layers
__UpperCamelCase : Union[str, Any] = decoder_attention_heads
__UpperCamelCase : List[str] = dropout
__UpperCamelCase : Optional[Any] = attention_dropout
__UpperCamelCase : Optional[int] = activation_dropout
__UpperCamelCase : Tuple = activation_function
__UpperCamelCase : Optional[Any] = init_std
__UpperCamelCase : Union[str, Any] = init_xavier_std
__UpperCamelCase : Any = encoder_layerdrop
__UpperCamelCase : Tuple = auxiliary_loss
__UpperCamelCase : Dict = position_embedding_type
__UpperCamelCase : Union[str, Any] = backbone
__UpperCamelCase : List[str] = use_pretrained_backbone
__UpperCamelCase : int = dilation
# deformable attributes
__UpperCamelCase : Union[str, Any] = num_feature_levels
__UpperCamelCase : Union[str, Any] = encoder_n_points
__UpperCamelCase : int = decoder_n_points
__UpperCamelCase : List[Any] = two_stage
__UpperCamelCase : Dict = two_stage_num_proposals
__UpperCamelCase : List[str] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__UpperCamelCase : Union[str, Any] = class_cost
__UpperCamelCase : Tuple = bbox_cost
__UpperCamelCase : Any = giou_cost
# Loss coefficients
__UpperCamelCase : Dict = mask_loss_coefficient
__UpperCamelCase : int = dice_loss_coefficient
__UpperCamelCase : List[Any] = bbox_loss_coefficient
__UpperCamelCase : Optional[int] = giou_loss_coefficient
__UpperCamelCase : Any = eos_coefficient
__UpperCamelCase : int = focal_alpha
__UpperCamelCase : Union[str, Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__UpperCamelCase : Optional[Any] = self.backbone_config.to_dict()
__UpperCamelCase : List[str] = self.__class__.model_type
return output | 362 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[2, 2, 3, 2] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=["stage2", "stage3", "stage4"] , __UpperCamelCase=3 , __UpperCamelCase=None , ) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : Union[str, Any] = image_size
__UpperCamelCase : Any = num_channels
__UpperCamelCase : Union[str, Any] = num_stages
__UpperCamelCase : List[Any] = hidden_sizes
__UpperCamelCase : Optional[Any] = depths
__UpperCamelCase : Dict = is_training
__UpperCamelCase : List[Any] = use_labels
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Tuple = type_sequence_label_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : List[Any] = out_features
__UpperCamelCase : Optional[Any] = num_labels
__UpperCamelCase : Optional[Any] = scope
__UpperCamelCase : List[Any] = num_stages
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : int = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCamelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = UperNetForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase : Dict = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase : Union[str, Any] = False
lowercase : Tuple = False
lowercase : Optional[int] = False
lowercase : Tuple = False
lowercase : List[str] = False
lowercase : Any = False
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = UperNetModelTester(self )
__UpperCamelCase : str = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = model_class(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : List[str] = [*signature.parameters.keys()]
__UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : Any = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple = _config_zero_init(__UpperCamelCase )
__UpperCamelCase : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__UpperCamelCase : List[str] = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase_ ():
__UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
__UpperCamelCase : List[str] = Image.open(_lowerCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
__UpperCamelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : Any = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : Any = model(**__UpperCamelCase )
__UpperCamelCase : Tuple = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
__UpperCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : int = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : int = model(**__UpperCamelCase )
__UpperCamelCase : Dict = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) ) | 171 | 0 |
'''simple docstring'''
a_ : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _A () -> None:
'''simple docstring'''
_a = input('Enter message: ' )
_a = input('Enter key [alphanumeric]: ' )
_a = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
_a = 'encrypt'
_a = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('d' ):
_a = 'decrypt'
_a = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'\n{mode.title()}ed message:' )
print(lowerCAmelCase__ )
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
return translate_message(lowerCAmelCase__ , lowerCAmelCase__ , 'encrypt' )
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
return translate_message(lowerCAmelCase__ , lowerCAmelCase__ , 'decrypt' )
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
_a = []
_a = 0
_a = key.upper()
for symbol in message:
_a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase__ ):
_a = 0
else:
translated.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 168 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _A (lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a :
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
pass
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = np.abs((a - b) ).max()
self.assertLessEqual(__magic_name__ , __magic_name__ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Optional[Any]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Union[str, Any]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = after_output[0]
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-3 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Any:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(
input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ )
_a = output.vision_model_output.attentions
self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = to_atuple(vision_model.config.image_size )
_a = to_atuple(vision_model.config.patch_size )
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a = output.text_model_output.attentions
self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
pt_model.to(__magic_name__ )
pt_model.eval()
# prepare inputs
_a = inputs_dict
_a = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_a = pt_model(**__magic_name__ ).to_tuple()
_a = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
_a = fx_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__magic_name__ )
_a = VisionTextDualEncoderModel.from_pretrained(__magic_name__ , from_flax=__magic_name__ )
pt_model_loaded.to(__magic_name__ )
pt_model_loaded.eval()
with torch.no_grad():
_a = pt_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output_loaded.numpy() , 4e-2 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = VisionTextDualEncoderModel(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __magic_name__ )
_a = fx_state
self.check_pt_flax_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = VisionTextDualEncoderModel(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = load_flax_weights_in_pytorch_model(__magic_name__ , fx_model.params )
self.check_pt_flax_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
self.check_save_load(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__magic_name__ )
@is_pt_flax_cross_test
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
_a = config_inputs_dict.pop('vision_config' )
_a = config_inputs_dict.pop('text_config' )
_a = config_inputs_dict
self.check_equivalence_pt_to_flax(__magic_name__ , __magic_name__ , __magic_name__ )
self.check_equivalence_flax_to_pt(__magic_name__ , __magic_name__ , __magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a , _a = self.get_pretrained_model_and_inputs()
_a = model_a(**__magic_name__ )
_a = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model_a(**__magic_name__ )
_a = after_outputs[0]
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-5 )
@require_flax
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> List[str]:
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__magic_name__ , text_from_pt=__magic_name__ , )
_a = 13
_a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = FlaxViTModel(__magic_name__ )
_a = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = FlaxViTModelTester(self )
_a = FlaxBertModelTester(self )
_a = vit_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
_a , _a , _a , _a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Any:
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__magic_name__ , text_from_pt=__magic_name__ , )
_a = 13
_a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = FlaxCLIPVisionModel(__magic_name__ )
_a = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Tuple:
_a = FlaxCLIPVisionModelTester(self )
_a = FlaxBertModelTester(self )
_a = clip_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
_a , _a , _a , _a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ) -> Tuple:
_a = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
_a = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=__magic_name__ , padding=__magic_name__ , return_tensors='np' )
_a = model(**__magic_name__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_a = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __magic_name__ , atol=1e-3 ) )
| 168 | 1 |
from __future__ import annotations
_lowerCamelCase : Any = tuple[int, int, int]
_lowerCamelCase : Optional[int] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_lowerCamelCase : Any = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
_lowerCamelCase : Optional[int] = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
_lowerCamelCase : List[str] = """FOBHMDKEXQNRAULPGSJVTYICZW"""
_lowerCamelCase : List[str] = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
_lowerCamelCase : List[Any] = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
_lowerCamelCase : str = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
_lowerCamelCase : Optional[Any] = """SGLCPQWZHKXAREONTFBVIYJUDM"""
_lowerCamelCase : List[Any] = """HVSICLTYKQUBXDWAJZOMFGPREN"""
_lowerCamelCase : Union[str, Any] = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
_lowerCamelCase : Dict = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
_lowerCamelCase : Any = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(lowercase_ ) )) < 3:
A__ = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowercase_ )
# Checks if rotor positions are valid
A__ , A__ , A__ = rotpos
if not 0 < rotorposa <= len(lowercase_ ):
A__ = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowercase_ )
if not 0 < rotorposa <= len(lowercase_ ):
A__ = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowercase_ )
if not 0 < rotorposa <= len(lowercase_ ):
A__ = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowercase_ )
# Validates string and returns dict
A__ = _plugboard(lowercase_ )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict[str, str]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
A__ = f"""Plugboard setting isn't type string ({type(lowercase_ )})"""
raise TypeError(lowercase_ )
elif len(lowercase_ ) % 2 != 0:
A__ = f"""Odd number of symbols ({len(lowercase_ )})"""
raise Exception(lowercase_ )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
A__ = set()
for i in pbstring:
if i not in abc:
A__ = f"""'{i}' not in list of symbols"""
raise Exception(lowercase_ )
elif i in tmppbl:
A__ = f"""Duplicate symbol ({i})"""
raise Exception(lowercase_ )
else:
tmppbl.add(lowercase_ )
del tmppbl
# Created the dictionary
A__ = {}
for j in range(0 , len(lowercase_ ) - 1 , 2 ):
A__ = pbstring[j + 1]
A__ = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = (rotora, rotora, rotora) , lowercase_ = "" , ) -> str:
"""simple docstring"""
A__ = text.upper()
A__ , A__ , A__ = _validator(
lowercase_ , lowercase_ , plugb.upper() )
A__ , A__ , A__ = rotor_position
A__ , A__ , A__ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
A__ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
A__ = plugboard[symbol]
# rotor ra --------------------------
A__ = abc.index(lowercase_ ) + rotorposa
A__ = rotora[index % len(lowercase_ )]
# rotor rb --------------------------
A__ = abc.index(lowercase_ ) + rotorposa
A__ = rotora[index % len(lowercase_ )]
# rotor rc --------------------------
A__ = abc.index(lowercase_ ) + rotorposa
A__ = rotora[index % len(lowercase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
A__ = reflector[symbol]
# 2nd rotors
A__ = abc[rotora.index(lowercase_ ) - rotorposa]
A__ = abc[rotora.index(lowercase_ ) - rotorposa]
A__ = abc[rotora.index(lowercase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
A__ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowercase_ ):
A__ = 0
rotorposa += 1
if rotorposa >= len(lowercase_ ):
A__ = 0
rotorposa += 1
if rotorposa >= len(lowercase_ ):
A__ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowercase_ )
return "".join(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = """This is my Python script that emulates the Enigma machine from WWII."""
_lowerCamelCase : str = (1, 1, 1)
_lowerCamelCase : Union[str, Any] = """pictures"""
_lowerCamelCase : List[Any] = (rotora, rotora, rotora)
_lowerCamelCase : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 231 |
from __future__ import annotations
import queue
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Dict) ->Any:
'''simple docstring'''
A__ = data
A__ = None
A__ = None
def SCREAMING_SNAKE_CASE ( ) -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
A__ = input('''Enter the value of the root node: ''' ).strip().lower()
A__ = queue.Queue()
A__ = TreeNode(int(lowercase_ ) )
q.put(lowercase_ )
while not q.empty():
A__ = q.get()
A__ = f"""Enter the left node of {node_found.data}: """
A__ = input(lowercase_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
A__ = TreeNode(int(lowercase_ ) )
A__ = left_node
q.put(lowercase_ )
A__ = f"""Enter the right node of {node_found.data}: """
A__ = input(lowercase_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
A__ = TreeNode(int(lowercase_ ) )
A__ = right_node
q.put(lowercase_ )
raise
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ = queue.Queue()
q.put(lowercase_ )
while not q.empty():
A__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ = queue.Queue()
q.put(lowercase_ )
while not q.empty():
A__ = []
while not q.empty():
A__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ = []
A__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(lowercase_ )
A__ = n.left
# end of while means current node doesn't have left child
A__ = stack.pop()
# start to traverse its right child
A__ = n.right
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ = []
A__ = node
while n or stack:
while n:
stack.append(lowercase_ )
A__ = n.left
A__ = stack.pop()
print(n.data , end=''',''' )
A__ = n.right
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ , A__ = [], []
A__ = node
stacka.append(lowercase_ )
while stacka: # to find the reversed order of post order, store it in stack2
A__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowercase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def SCREAMING_SNAKE_CASE ( lowercase_ = "" , lowercase_=50 , lowercase_="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
A__ , A__ = divmod(width - len(lowercase_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
_lowerCamelCase : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 231 | 1 |
'''simple docstring'''
import random
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) ->Optional[int]:
_SCREAMING_SNAKE_CASE = a[left_index]
_SCREAMING_SNAKE_CASE = left_index + 1
for j in range(left_index + 1 , __lowerCamelCase ):
if a[j] < pivot:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = a[i], a[j]
i += 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) ->str:
if left < right:
_SCREAMING_SNAKE_CASE = random.randint(__lowerCamelCase , right - 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_SCREAMING_SNAKE_CASE = partition(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
quick_sort_random(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowerCamelCase , pivot_index + 1 , __lowerCamelCase ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ) ->Tuple:
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""" ).strip()
_SCREAMING_SNAKE_CASE = [int(__lowerCamelCase ) for item in user_input.split(""",""" )]
quick_sort_random(__lowerCamelCase , 0 , len(__lowerCamelCase ) )
print(__lowerCamelCase )
if __name__ == "__main__":
main()
| 58 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowercase ):
UpperCAmelCase : List[Any] = ["""speech"""]
def __init__(self : int , *_A : List[str] , **_A : List[Any]) -> List[Any]:
requires_backends(self , ['speech'])
class UpperCamelCase ( metaclass=lowercase ):
UpperCAmelCase : List[str] = ["""speech"""]
def __init__(self : Dict , *_A : Tuple , **_A : Dict) -> Optional[Any]:
requires_backends(self , ['speech'])
| 95 | """simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_a : int= NewType("DataClass", Any)
_a : Dict= NewType("DataClassType", Any)
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def __UpperCAmelCase ( UpperCAmelCase_ : list ) -> Callable[[str], Any]:
'''simple docstring'''
__snake_case : str = {str(UpperCAmelCase_ ): choice for choice in choices}
return lambda UpperCAmelCase_ : str_to_choice.get(UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( *,
UpperCAmelCase_ : Union[str, List[str]] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Any = dataclasses.MISSING , UpperCAmelCase_ : Callable[[], Any] = dataclasses.MISSING , UpperCAmelCase_ : dict = None , **UpperCAmelCase_ : str , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case : Optional[Any] = {}
if aliases is not None:
__snake_case : Optional[int] = aliases
if help is not None:
__snake_case : Optional[int] = help
return dataclasses.field(metadata=UpperCAmelCase_ , default=UpperCAmelCase_ , default_factory=UpperCAmelCase_ , **UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Iterable[DataClassType]
def __init__(self : Tuple , _A : Union[DataClassType, Iterable[DataClassType]] , **_A : int) -> int:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__snake_case : Union[str, Any] = ArgumentDefaultsHelpFormatter
super().__init__(**_A)
if dataclasses.is_dataclass(_A):
__snake_case : Optional[int] = [dataclass_types]
__snake_case : Dict = list(_A)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_A)
@staticmethod
def _lowercase (_A : ArgumentParser , _A : dataclasses.Field) -> Tuple:
__snake_case : Union[str, Any] = f"--{field.name}"
__snake_case : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _A):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default')
__snake_case : Any = kwargs.pop('aliases' , [])
if isinstance(_A , _A):
__snake_case : Optional[Any] = [aliases]
__snake_case : Tuple = getattr(field.type , '__origin__' , field.type)
if origin_type is Union or (hasattr(_A , 'UnionType') and isinstance(_A , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(_A) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f" Problem encountered in field '{field.name}'.")
if type(_A) not in field.type.__args__:
# filter `str` in Union
__snake_case : Tuple = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case : Optional[int] = getattr(field.type , '__origin__' , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case : Optional[Any] = (
field.type.__args__[0] if isinstance(_A , field.type.__args__[1]) else field.type.__args__[1]
)
__snake_case : Tuple = getattr(field.type , '__origin__' , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , _A) and issubclass(field.type , _A)):
if origin_type is Literal:
__snake_case : Tuple = field.type.__args__
else:
__snake_case : Dict = [x.value for x in field.type]
__snake_case : Dict = make_choice_type_function(kwargs['choices'])
if field.default is not dataclasses.MISSING:
__snake_case : Dict = field.default
else:
__snake_case : Union[str, Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case : Tuple = copy(_A)
# Hack because type=bool in argparse does not behave as we want.
__snake_case : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case : str = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case : Dict = '?'
# This is the value that will get picked if we do --field_name (without value)
__snake_case : List[str] = True
elif isclass(_A) and issubclass(_A , _A):
__snake_case : str = field.type.__args__[0]
__snake_case : Any = '+'
if field.default_factory is not dataclasses.MISSING:
__snake_case : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case : Any = True
else:
__snake_case : Tuple = field.type
if field.default is not dataclasses.MISSING:
__snake_case : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case : List[Any] = field.default_factory()
else:
__snake_case : Union[str, Any] = True
parser.add_argument(_A , *_A , **_A)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case : List[str] = False
parser.add_argument(f"--no_{field.name}" , action='store_false' , dest=field.name , **_A)
def _lowercase (self : List[Any] , _A : DataClassType) -> Optional[int]:
if hasattr(_A , '_argument_group_name'):
__snake_case : Union[str, Any] = self.add_argument_group(dtype._argument_group_name)
else:
__snake_case : int = self
try:
__snake_case : Dict[str, type] = get_type_hints(_A)
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)')
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_A):
__snake_case : Union[str, Any] = '.'.join(map(_A , sys.version_info[:3]))
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.') from ex
raise
for field in dataclasses.fields(_A):
if not field.init:
continue
__snake_case : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(_A , _A)
def _lowercase (self : Union[str, Any] , _A : List[Any]=None , _A : Optional[Any]=False , _A : int=True , _A : List[Any]=None , _A : str=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
__snake_case : Any = []
if args_filename:
args_files.append(Path(_A))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix('.args'))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case : int = ArgumentParser()
args_file_parser.add_argument(_A , type=_A , action='append')
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case , __snake_case : int = args_file_parser.parse_known_args(args=_A)
__snake_case : int = vars(_A).get(args_file_flag.lstrip('-') , _A)
if cmd_args_file_paths:
args_files.extend([Path(_A) for p in cmd_args_file_paths])
__snake_case : Optional[int] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case : List[str] = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case , __snake_case : Tuple = self.parse_known_args(args=_A)
__snake_case : Dict = []
for dtype in self.dataclass_types:
__snake_case : List[Any] = {f.name for f in dataclasses.fields(_A) if f.init}
__snake_case : List[str] = {k: v for k, v in vars(_A).items() if k in keys}
for k in keys:
delattr(_A , _A)
__snake_case : List[str] = dtype(**_A)
outputs.append(_A)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(_A)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}")
return (*outputs,)
def _lowercase (self : Tuple , _A : Dict[str, Any] , _A : bool = False) -> Tuple[DataClass, ...]:
__snake_case : List[Any] = set(args.keys())
__snake_case : Dict = []
for dtype in self.dataclass_types:
__snake_case : List[str] = {f.name for f in dataclasses.fields(_A) if f.init}
__snake_case : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
__snake_case : List[str] = dtype(**_A)
outputs.append(_A)
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(_A)}")
return tuple(_A)
def _lowercase (self : int , _A : str , _A : bool = False) -> Tuple[DataClass, ...]:
with open(Path(_A) , encoding='utf-8') as open_json_file:
__snake_case : int = json.loads(open_json_file.read())
__snake_case : Optional[int] = self.parse_dict(_A , allow_extra_keys=_A)
return tuple(_A)
def _lowercase (self : List[str] , _A : str , _A : bool = False) -> Tuple[DataClass, ...]:
__snake_case : Dict = self.parse_dict(yaml.safe_load(Path(_A).read_text()) , allow_extra_keys=_A)
return tuple(_A)
| 95 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase (UpperCamelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : int = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ : Optional[Any] = """FlavaImageProcessor"""
lowerCAmelCase__ : str = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self : List[Any] , UpperCamelCase : Tuple=None , UpperCamelCase : Any=None , **UpperCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCamelCase , )
lowercase__ = kwargs.pop('''feature_extractor''' )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCamelCase , __lowerCamelCase )
lowercase__ = self.image_processor
def __call__(self : Optional[int] , UpperCamelCase : Optional[ImageInput] = None , UpperCamelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = False , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : str , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
if images is not None:
lowercase__ = self.image_processor(
__lowerCamelCase , return_image_mask=__lowerCamelCase , return_codebook_pixels=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
if text is not None and images is not None:
encoding.update(__lowerCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def UpperCamelCase__ (self : Optional[int] , *UpperCamelCase : Any , **UpperCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__ (self : int , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCamelCase , )
return self.image_processor
| 2 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _a :
"""simple docstring"""
def __init__( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=sys.maxsize ):
'''simple docstring'''
UpperCamelCase__: List[Any] = "bilinear"
UpperCamelCase__: Optional[int] = max_size
UpperCamelCase__: Optional[int] = short_edge_length
def __call__( self: Optional[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = []
for img in imgs:
UpperCamelCase__ , UpperCamelCase__: Any = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCamelCase__: Optional[int] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCamelCase__: Dict = size * 1.0 / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = size, scale * w
else:
UpperCamelCase__ , UpperCamelCase__: Dict = scale * h, size
if max(__lowerCamelCase , __lowerCamelCase ) > self.max_size:
UpperCamelCase__: str = self.max_size * 1.0 / max(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = newh * scale
UpperCamelCase__: Any = neww * scale
UpperCamelCase__: List[str] = int(neww + 0.5 )
UpperCamelCase__: List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCamelCase__: Dict = Image.fromarray(__lowerCamelCase )
UpperCamelCase__: Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCamelCase__: str = np.asarray(__lowerCamelCase )
else:
UpperCamelCase__: Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCamelCase__: Optional[Any] = nn.functional.interpolate(
__lowerCamelCase , (newh, neww) , mode=self.interp_method , align_corners=__lowerCamelCase ).squeeze(0 )
img_augs.append(__lowerCamelCase )
return img_augs
class _a :
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCamelCase__: Union[str, Any] = cfg.INPUT.FORMAT
UpperCamelCase__: Union[str, Any] = cfg.SIZE_DIVISIBILITY
UpperCamelCase__: Tuple = cfg.PAD_VALUE
UpperCamelCase__: str = cfg.INPUT.MAX_SIZE_TEST
UpperCamelCase__: int = cfg.MODEL.DEVICE
UpperCamelCase__: str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: int = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: List[Any] = lambda __lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = tuple(max(__lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
UpperCamelCase__: Tuple = [im.shape[-2:] for im in images]
UpperCamelCase__: Optional[int] = [
nn.functional.pad(
__lowerCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__lowerCamelCase , __lowerCamelCase )
]
return torch.stack(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
def __call__( self: str , __lowerCamelCase: Dict , __lowerCamelCase: Any=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: int = [images]
if single_image:
assert len(__lowerCamelCase ) == 1
for i in range(len(__lowerCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__lowerCamelCase , images.pop(__lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__lowerCamelCase , torch.as_tensor(img_tensorize(images.pop(__lowerCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCamelCase__: int = torch.tensor([im.shape[:2] for im in images] )
UpperCamelCase__: int = self.aug(__lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCamelCase__: Any = [self.normalizer(__lowerCamelCase ) for x in images]
# now pad them to do the following operations
UpperCamelCase__ , UpperCamelCase__: Any = self.pad(__lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCamelCase__: Optional[int] = torch.true_divide(__lowerCamelCase , __lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_ ( A_ ,A_):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_ ( A_ ,A_):
assert torch.isfinite(A_).all(), "Box tensor contains infinite or NaN!"
UpperCamelCase__ , UpperCamelCase__: int = box_size
tensor[:, 0].clamp_(min=0 ,max=A_)
tensor[:, 1].clamp_(min=0 ,max=A_)
tensor[:, 2].clamp_(min=0 ,max=A_)
tensor[:, 3].clamp_(min=0 ,max=A_)
| 149 | 0 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__lowercase : Dict = logging.getLogger(__name__)
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : Tuple = False
def __UpperCAmelCase ( self , __a , __a , __a , __a ):
'''simple docstring'''
if not self.initialized:
__a : Any = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
__a : int = True
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.retriever.index.init_index()
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.retriever._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class __UpperCamelCase ( a__ ):
def __init__( self , __a , __a , __a , __a , __a=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(_lowerCamelCase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
__a : Any = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for worker in self.retrieval_workers
] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__a : Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__a : List[Any] = ray.get(random_worker.retrieve.remote(_lowerCamelCase , _lowerCamelCase ) )
else:
__a : Any = self._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls , __a , __a=None , **__a ):
'''simple docstring'''
return super(_lowerCamelCase , cls ).get_tokenizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls , __a , __a , __a=None , **__a ):
'''simple docstring'''
__a : int = kwargs.pop('config' , _lowerCamelCase ) or RagConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
__a : Optional[int] = RagTokenizer.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__a : Any = rag_tokenizer.question_encoder
__a : Tuple = rag_tokenizer.generator
if indexed_dataset is not None:
__a : Tuple = '''custom'''
__a : Optional[Any] = CustomHFIndex(config.retrieval_vector_size , _lowerCamelCase )
else:
__a : List[str] = cls._build_index(_lowerCamelCase )
return cls(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , retrieval_workers=_lowerCamelCase , index=_lowerCamelCase , )
| 369 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
snake_case : Optional[Any] = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
snake_case : Dict = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
snake_case : Union[str, Any] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCamelCase , hypotheses=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase )
}
| 94 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCamelCase__ ( lowercase__ : Any ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def UpperCamelCase__ ( lowercase__ : Optional[int] ):
from transformers.testing_utils import pytest_terminal_summary_main
snake_case : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
| 148 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> str:
UpperCamelCase : str = 10
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = [1, 2, 3, 4]
UpperCamelCase : int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_, self.block_size, 0 ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_, self.block_size, 0 ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_, self.block_size, 0 ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
UpperCamelCase , UpperCamelCase : List[Any] = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, [] )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : str = ''
UpperCamelCase , UpperCamelCase : Tuple = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, [] )
self.assertEqual(SCREAMING_SNAKE_CASE_, [] )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
UpperCamelCase , UpperCamelCase : List[Any] = process_story(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = ['It was the best of times.']
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = torch.tensor([1, 2, 3, 4] )
UpperCamelCase : Optional[int] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_, 0 ).numpy(), expected.numpy() )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCamelCase : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_, 23 ).numpy(), expected.numpy() )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Optional[int] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCamelCase : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_, 1 ).numpy(), expected.numpy() )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Tuple = 101
UpperCamelCase : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCamelCase : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCamelCase : Tuple = compute_token_type_ids(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 103 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCAmelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 103 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__snake_case : Any = random.Random()
def _lowercase ( __snake_case ,__snake_case=1.0 ,__snake_case=None ,__snake_case=None ) -> int:
if rng is None:
__lowerCAmelCase : Union[str, Any] = global_rng
__lowerCAmelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=400 , _SCREAMING_SNAKE_CASE: Dict=2000 , _SCREAMING_SNAKE_CASE: Any=1 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=1_6000 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: Optional[int]=80 , _SCREAMING_SNAKE_CASE: Optional[int]=16 , _SCREAMING_SNAKE_CASE: Dict=64 , _SCREAMING_SNAKE_CASE: Optional[int]="hann_window" , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: List[Any]=7600 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-10 , _SCREAMING_SNAKE_CASE: Optional[int]=True , ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : List[str] = min_seq_length
__lowerCAmelCase : int = max_seq_length
__lowerCAmelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : List[str] = feature_size
__lowerCAmelCase : List[Any] = padding_value
__lowerCAmelCase : Optional[int] = sampling_rate
__lowerCAmelCase : Optional[int] = do_normalize
__lowerCAmelCase : Dict = num_mel_bins
__lowerCAmelCase : Optional[int] = hop_length
__lowerCAmelCase : Union[str, Any] = win_length
__lowerCAmelCase : Dict = win_function
__lowerCAmelCase : str = fmin
__lowerCAmelCase : Tuple = fmax
__lowerCAmelCase : Union[str, Any] = mel_floor
__lowerCAmelCase : int = return_attention_mask
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: List[str]=False) -> List[Any]:
"""simple docstring"""
def _flatten(_SCREAMING_SNAKE_CASE: Tuple):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE))
if equal_length:
__lowerCAmelCase : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
__lowerCAmelCase : List[str] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__lowerCAmelCase : Union[str, Any] = [np.asarray(_SCREAMING_SNAKE_CASE) for x in speech_inputs]
return speech_inputs
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: Optional[Any]=False) -> Any:
"""simple docstring"""
if equal_length:
__lowerCAmelCase : Tuple = [floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__lowerCAmelCase : List[Any] = [
floats_list((x, self.num_mel_bins))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__lowerCAmelCase : str = [np.asarray(_SCREAMING_SNAKE_CASE) for x in speech_inputs]
return speech_inputs
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = SpeechTaFeatureExtractor
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = SpeechTaFeatureExtractionTester(self)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: List[str]) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE , axis=0) - 1) < 1e-3))
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Any = [np.asarray(_SCREAMING_SNAKE_CASE) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors="np").input_values
__lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
# Test batched
__lowerCAmelCase : List[str] = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
__lowerCAmelCase : Optional[int] = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Any = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : str = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase : Optional[int] = [None, 1600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Any = feat_extract(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800])
self.assertTrue(input_values[0][800:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:1000])
self.assertTrue(input_values[0][1000:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:1200])
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Optional[int] = range(800 , 1400 , 200)
__lowerCAmelCase : Dict = [floats_list((1, x))[0] for x in lengths]
__lowerCAmelCase : Any = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase : Dict = [None, 1600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = feat_extract(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800])
self._check_zero_mean_unit_variance(input_values[1][:1000])
self._check_zero_mean_unit_variance(input_values[2][:1200])
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Optional[Any] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : str = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=1000 , padding="max_length" , return_tensors="np")
__lowerCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : List[Any] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : List[Any] = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=1000 , padding="longest" , return_tensors="np")
__lowerCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1, :1000])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000))
__lowerCAmelCase : Dict = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Optional[int] = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=2000 , padding="longest" , return_tensors="np")
__lowerCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1, :1000])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200))
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Optional[int] = np.random.rand(100).astype(np.floataa)
__lowerCAmelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Dict = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.floataa)
__lowerCAmelCase : Any = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Union[str, Any] = [np.asarray(_SCREAMING_SNAKE_CASE) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase : List[str] = feature_extractor(audio_target=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
self.assertTrue(input_values.ndim == 3)
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins)
# Test not batched input
__lowerCAmelCase : Any = feature_extractor(speech_inputs[0] , return_tensors="np").input_values
__lowerCAmelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
# Test batched
__lowerCAmelCase : List[str] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
__lowerCAmelCase : List[str] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : Optional[Any] = [floats_list((1, x))[0] for x in (800, 800, 800)]
__lowerCAmelCase : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
__lowerCAmelCase : Any = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self: Any) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase : str = feat_extract.model_input_names[0]
__lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(_SCREAMING_SNAKE_CASE) == len(_SCREAMING_SNAKE_CASE) for x, y in zip(_SCREAMING_SNAKE_CASE , processed_features[input_name])))
__lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np")
__lowerCAmelCase : List[str] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase : int = feat_extract.model_input_names[0]
__lowerCAmelCase : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="pt")
__lowerCAmelCase : Any = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase : Optional[int] = feat_extract.num_mel_bins # hack!
__lowerCAmelCase : Tuple = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase : List[str] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = self.feat_extract_dict
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Any = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCAmelCase : Dict = [len(_SCREAMING_SNAKE_CASE) for x in speech_inputs]
__lowerCAmelCase : Optional[int] = feat_extract.model_input_names[0]
__lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase : Union[str, Any] = feat_extract.num_mel_bins # hack!
__lowerCAmelCase : Optional[Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , _SCREAMING_SNAKE_CASE)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.feat_extract_dict
__lowerCAmelCase : Any = True
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCAmelCase : str = [len(_SCREAMING_SNAKE_CASE) for x in speech_inputs]
__lowerCAmelCase : str = feat_extract.model_input_names[0]
__lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase : List[str] = min(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = feat_extract.num_mel_bins # hack!
__lowerCAmelCase : List[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np")
self.assertIn("attention_mask" , _SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
__lowerCAmelCase : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
__lowerCAmelCase : Optional[Any] = ds.sort("id").select(range(_SCREAMING_SNAKE_CASE))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03])
# fmt: on
__lowerCAmelCase : str = self._load_datasamples(1)
__lowerCAmelCase : Optional[int] = SpeechTaFeatureExtractor()
__lowerCAmelCase : str = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 9_3680))
self.assertTrue(torch.allclose(input_values[0, :30] , _SCREAMING_SNAKE_CASE , atol=1e-6))
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998])
# fmt: on
__lowerCAmelCase : int = self._load_datasamples(1)
__lowerCAmelCase : str = SpeechTaFeatureExtractor()
__lowerCAmelCase : List[str] = feature_extractor(audio_target=_SCREAMING_SNAKE_CASE , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 366, 80))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Optional[int] = 50_000
__snake_case : Dict = 5_000
__snake_case , __snake_case : Union[str, Any] = os.path.split(__file__)
__snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : List[str] = dataset[i : i + batch_size]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
with dataset.formatted_as(type=__snake_case ):
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
with dataset.formatted_as(type=__snake_case ):
for i in range(0 ,__snake_case ,__snake_case ):
__lowerCAmelCase : Optional[int] = dataset[i : i + batch_size]
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase : Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
__lowerCAmelCase : Any = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
__lowerCAmelCase : int = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase : str = generate_example_dataset(
os.path.join(__snake_case ,"dataset.arrow" ) ,__snake_case ,num_examples=__snake_case ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : str = func(__snake_case ,**__snake_case )
print("shuffling dataset" )
__lowerCAmelCase : Optional[int] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : List[str] = func(
__snake_case ,**__snake_case )
with open(__snake_case ,"wb" ) as f:
f.write(json.dumps(__snake_case ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 269 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __SCREAMING_SNAKE_CASE ( _a ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase , """depth_multiplier""" ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.25 , __lowerCAmelCase=8 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=32 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu6" , __lowerCAmelCase=1280 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = depth_multiplier
UpperCamelCase__ = depth_divisible_by
UpperCamelCase__ = min_depth
UpperCamelCase__ = expand_ratio
UpperCamelCase__ = tf_padding
UpperCamelCase__ = output_stride
UpperCamelCase__ = first_layer_is_expansion
UpperCamelCase__ = finegrained_output
UpperCamelCase__ = hidden_act
UpperCamelCase__ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = use_labels
UpperCamelCase__ = is_training
UpperCamelCase__ = num_labels
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
def _lowerCamelCase ( self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCamelCase ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = MobileNetVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileNetVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileNetVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : Optional[Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case : Dict = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case : List[Any] = False
snake_case : Optional[int] = False
snake_case : Optional[int] = False
snake_case : int = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = MobileNetVaModelTester(self )
UpperCamelCase__ = MobileNetVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(__lowerCAmelCase )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = 16
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = MobileNetVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__lowerCAmelCase )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
# verify the logits
UpperCamelCase__ = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
UpperCamelCase__ = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
UpperCamelCase__ = model.to(__lowerCAmelCase )
UpperCamelCase__ = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __lowerCAmelCase )
UpperCamelCase__ = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 87 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
def _UpperCamelCase (a__ :Union[str, Any] , a__ :Optional[Any] ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , a__ )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase__ = processors[data_args.task_name]()
UpperCamelCase__ = processor.get_labels()
UpperCamelCase__ = len(a__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(a__ :EvalPrediction ) -> Dict:
UpperCamelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(a__ , p.label_ids )}
# Data collator
UpperCamelCase__ = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(a__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , a__ , a__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(a__ )
return results
def _UpperCamelCase (a__ :Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 87 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Optional[Any] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 246 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if "bot_conv" in key:
lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase = value
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 46 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 57 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__A : Tuple = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : int):
A = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : Dict):
try:
delete_repo(token=cls._token , repo_id="test-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config")
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("test-config" , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="test-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id="test-config" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : int):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="valid_org/test-config-org" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
CustomConfig.register_for_auto_class()
A = CustomConfig(attribute=4_2)
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"})
A = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig")
self.assertEqual(new_config.attribute , 4_2)
class __UpperCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A = c.n_embd + 1 # int
A = c.resid_pdrop + 1.0 # float
A = not c.scale_attn_weights # bool
A = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.n_embd , "mismatch for key: n_embd")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.resid_pdrop , "mismatch for key: resid_pdrop")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.scale_attn_weights , "mismatch for key: scale_attn_weights")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.summary_type , "mismatch for key: summary_type")
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = PretrainedConfig()
A = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"])
A = [key for key, value in config_common_kwargs.items() if value == getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
if len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {", ".join(__SCREAMING_SNAKE_CASE)}.""")
def SCREAMING_SNAKE_CASE__ (self : Dict):
with self.assertRaises(__SCREAMING_SNAKE_CASE):
# config is in subfolder, the following should not work without specifying the subfolder
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder")
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert")
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : int):
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 5_0_0
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This test is for deprecated behavior and can be removed in v5
A = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json")
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = AutoConfig.from_pretrained("bert-base-cased")
A = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
A = 2
json.dump(configuration.to_dict() , open(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , "w"))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A = ["config.42.0.0.json"]
A = 7_6_8
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
shutil.move(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , os.path.join(__SCREAMING_SNAKE_CASE , "config.42.0.0.json"))
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 7_6_8)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
A = "v4.0.0"
A , A = new_transformers.models.auto.AutoConfig.from_pretrained(
__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A = "v3.0.0"
A = old_transformers.models.auto.AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(old_configuration.hidden_size , 7_6_8)
| 57 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 191 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
UpperCAmelCase_ : List[Any] = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCAmelCase_ : Optional[Any] = BASE_URL + '/user'
# https://github.com/settings/tokens
UpperCAmelCase_ : str = os.environ.get('USER_TOKEN', '')
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""Authorization""": f"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 200 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =CustomTokenizer
pass
| 363 |
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase_)
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
snake_case_ =numpy.vectorize(lambda lowerCamelCase__: x % 36)
snake_case_ =numpy.vectorize(lowerCamelCase__)
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.modulus(__lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase__ : Optional[int] = encrypt_key.shape[0]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
return self.key_string.index(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
return self.key_string[round(__lowerCamelCase )]
def lowerCAmelCase__ (self ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : str = det % len(self.key_string )
lowerCAmelCase__ : Optional[Any] = len(self.key_string )
if greatest_common_divisor(__lowerCamelCase ,len(self.key_string ) ) != 1:
lowerCAmelCase__ : List[str] = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase__ : int = chars[-1]
while len(__lowerCamelCase ) % self.break_key != 0:
chars.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : str = ''''''
for i in range(0 ,len(__lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : Any = text[i : i + self.break_key]
lowerCAmelCase__ : Dict = [self.replace_letters(__lowerCamelCase ) for char in batch]
lowerCAmelCase__ : int = numpy.array([vec] ).T
lowerCAmelCase__ : Union[str, Any] = self.modulus(self.encrypt_key.dot(__lowerCamelCase ) ).T.tolist()[
0
]
lowerCAmelCase__ : Union[str, Any] = ''''''.join(
self.replace_digits(__lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ (self ) -> numpy.ndarray:
"""simple docstring"""
lowerCAmelCase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : int = det % len(self.key_string )
lowerCAmelCase__ : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase__ : Optional[Any] = i
break
lowerCAmelCase__ : Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowerCamelCase ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.make_decrypt_key()
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : Optional[Any] = ''''''
for i in range(0 ,len(__lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : List[Any] = text[i : i + self.break_key]
lowerCAmelCase__ : Tuple = [self.replace_letters(__lowerCamelCase ) for char in batch]
lowerCAmelCase__ : Optional[Any] = numpy.array([vec] ).T
lowerCAmelCase__ : Tuple = self.modulus(decrypt_key.dot(__lowerCamelCase ) ).T.tolist()[0]
lowerCAmelCase__ : Optional[int] = ''''''.join(
self.replace_digits(__lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Any = int(input('''Enter the order of the encryption key: '''))
lowerCAmelCase__ : Union[str, Any] = []
print('''Enter each row of the encryption key with space separated integers''')
for _ in range(lowerCamelCase_):
lowerCAmelCase__ : int = [int(lowerCamelCase_) for x in input().split()]
hill_matrix.append(lowerCamelCase_)
lowerCAmelCase__ : List[str] = HillCipher(numpy.array(lowerCamelCase_))
print('''Would you like to encrypt or decrypt some text? (1 or 2)''')
lowerCAmelCase__ : List[Any] = input('''\n1. Encrypt\n2. Decrypt\n''')
if option == "1":
lowerCAmelCase__ : Optional[int] = input('''What text would you like to encrypt?: ''')
print('''Your encrypted text is:''')
print(hc.encrypt(lowerCamelCase_))
elif option == "2":
lowerCAmelCase__ : Dict = input('''What text would you like to decrypt?: ''')
print('''Your decrypted text is:''')
print(hc.decrypt(lowerCamelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase: List[str] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase: Union[str, Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
lowerCAmelCase: Union[str, Any] = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase: Optional[int] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"https://google.com{link.get('href')}") | 297 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 297 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 1
while len(lowerCAmelCase ) < 1e6:
constant.append(str(lowerCAmelCase ) )
i += 1
UpperCAmelCase = """""".join(lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 368 |
"""simple docstring"""
lowerCAmelCase_ : Dict = {str(digit): digit**5 for digit in range(1_0)}
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowerCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 248 | 0 |
from typing import Any
class a_ :
"""simple docstring"""
def __init__( self : int ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =data
SCREAMING_SNAKE_CASE =None
def __repr__( self : Tuple ):
return f'Node({self.data})'
class a_ :
"""simple docstring"""
def __init__( self : List[Any] ):
SCREAMING_SNAKE_CASE =None
def __iter__( self : Tuple ):
SCREAMING_SNAKE_CASE =self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE =node.next
def __len__( self : List[Any] ):
return sum(1 for _ in self )
def __repr__( self : Dict ):
return "->".join([str(__UpperCamelCase ) for item in self] )
def __getitem__( self : Tuple ,snake_case : List[Any] ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : List[str] ,snake_case : str ,snake_case : Optional[int] ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE =self.head
for _ in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE =current.next
SCREAMING_SNAKE_CASE =data
def _lowerCAmelCase ( self : List[str] ,snake_case : Optional[Any] ):
self.insert_nth(len(self ) ,__UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Any ):
self.insert_nth(0 ,__UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[Any] ,snake_case : Optional[int] ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE =Node(__UpperCamelCase )
if self.head is None:
SCREAMING_SNAKE_CASE =new_node
elif index == 0:
SCREAMING_SNAKE_CASE =self.head # link new_node to head
SCREAMING_SNAKE_CASE =new_node
else:
SCREAMING_SNAKE_CASE =self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE =temp.next
SCREAMING_SNAKE_CASE =temp.next
SCREAMING_SNAKE_CASE =new_node
def _lowerCAmelCase ( self : List[Any] ): # print every node data
print(self )
def _lowerCAmelCase ( self : Optional[Any] ):
return self.delete_nth(0 )
def _lowerCAmelCase ( self : List[Any] ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def _lowerCAmelCase ( self : int ,snake_case : str = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE =self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE =self.head.next
else:
SCREAMING_SNAKE_CASE =self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE =temp.next
SCREAMING_SNAKE_CASE =temp.next
SCREAMING_SNAKE_CASE =temp.next.next
return delete_node.data
def _lowerCAmelCase ( self : Tuple ):
return self.head is None
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE =current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE =prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE =current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE =next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE =prev
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =LinkedList()
assert linked_list.is_empty() is True
assert str(A__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(A__ ) == i
linked_list.insert_nth(A__, i + 1 )
assert str(A__ ) == "->".join(str(A__ ) for i in range(1, 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(A__ ) == "->".join(str(A__ ) for i in range(0, 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(A__ ) == 9
assert str(A__ ) == "->".join(str(A__ ) for i in range(1, 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
SCREAMING_SNAKE_CASE =-i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(A__ ) == "->".join(str(A__ ) for i in range(-8, 1 ) )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
SCREAMING_SNAKE_CASE =LinkedList()
for i in test_input:
linked_list.insert_tail(A__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(A__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE =linked_list.delete_head()
assert result == -9
assert (
str(A__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE =linked_list.delete_tail()
assert result == 12.2
assert (
str(A__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE =linked_list.delete_nth(10 )
assert result is None
assert (
str(A__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(A__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(A__ )
assert (
str(A__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(A__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def snake_case__ ( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE =LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(A__ )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
SCREAMING_SNAKE_CASE =input('Enter New Value: ' ).strip()
print('New list:' )
print(A__ )
print(F'length of linked_list is : {len(A__ )}' )
if __name__ == "__main__":
main()
| 334 | import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase__ : Union[str, Any] = '''http://www.mocksite.com/file1.txt'''
lowerCAmelCase__ : Optional[Any] = '''"text": ["foo", "foo"]'''
lowerCAmelCase__ : List[str] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class __snake_case :
__lowerCamelCase = 200
__lowerCamelCase = {"""Content-Length""": """100"""}
__lowerCamelCase = {}
def __a ( self , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return [bytes(__UpperCamelCase , 'utf-8' )]
def UpperCamelCase__ ( *A__ , **A__ ) -> Optional[Any]:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
import requests
monkeypatch.setattr(A__ , 'request' , A__ )
snake_case__ : Any = URL
if issubclass(A__ , A__ ):
snake_case__ : Optional[Any] = url
elif issubclass(A__ , A__ ):
snake_case__ : Dict = [url]
elif issubclass(A__ , A__ ):
snake_case__ : Any = {'train': url}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[str] = 'downloads'
snake_case__ : int = tmp_path
snake_case__ : Tuple = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
snake_case__ : Any = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Any = dl_manager.download(A__ )
snake_case__ : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
snake_case__ : int = [downloaded_paths]
snake_case__ : Any = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
snake_case__ : Union[str, Any] = downloaded_paths.values()
snake_case__ : Any = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
snake_case__ : int = Path(A__ )
snake_case__ : Optional[int] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
snake_case__ : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
snake_case__ : int = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
snake_case__ : int = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
snake_case__ : Tuple = str(A__ )
if issubclass(A__ , A__ ):
snake_case__ : Dict = filename
elif issubclass(A__ , A__ ):
snake_case__ : Any = [filename]
elif issubclass(A__ , A__ ):
snake_case__ : Dict = {'train': filename}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[Any] = xz_file.parent
snake_case__ : Dict = 'extracted'
snake_case__ : List[Any] = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
snake_case__ : Optional[int] = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Optional[Any] = dl_manager.extract(A__ )
snake_case__ : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
snake_case__ : str = [extracted_paths]
snake_case__ : Dict = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
snake_case__ : Any = extracted_paths.values()
snake_case__ : Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
snake_case__ : Optional[int] = Path(A__ )
snake_case__ : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
snake_case__ : Dict = extracted_path.read_text()
snake_case__ : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(A__ , start=1 ):
snake_case__ : Optional[int] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
snake_case__ : Tuple = request.getfixturevalue(A__ )
snake_case__ : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> int:
snake_case__ : List[Any] = request.getfixturevalue(A__ )
snake_case__ : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase__ ( A__ ) -> Union[str, Any]:
snake_case__ : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 143 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase ( a__ , a__ , a__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
__a = quote(a__ )
return hfh.hf_hub_url(a__ , a__ , repo_type='''dataset''' , revision=a__ ) | 33 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray:
__a = cva.getAffineTransform(a__ , a__ )
return cva.warpAffine(a__ , a__ , (rows, cols) )
if __name__ == "__main__":
# read original image
A : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A , A : List[Any] = gray_img.shape
# set different points to rotate image
A : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
A : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
A : Tuple = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
A : Tuple = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
A : Tuple = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A : Union[str, Any] = plt.figure(1)
A : str = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show() | 33 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( snake_case , snake_case=100 , snake_case=" " ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = text.split(snake_case )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case ) , snake_case )]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(snake_case ):
titles.append(title if title is not None else '''''' )
texts.append(snake_case )
return {"title": titles, "text": texts}
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=snake_case , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
__SCREAMING_SNAKE_CASE : List[Any] = ctx_encoder(input_ids.to(device=snake_case ) , return_dict=snake_case ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a__ ( snake_case , snake_case , snake_case , ):
"""simple docstring"""
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__SCREAMING_SNAKE_CASE : Optional[Any] = dataset.map(snake_case , batched=snake_case , num_proc=processing_args.num_proc )
# And compute the embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case )
__SCREAMING_SNAKE_CASE : List[str] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__SCREAMING_SNAKE_CASE : List[Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__SCREAMING_SNAKE_CASE : Optional[int] = dataset.map(
partial(snake_case , ctx_encoder=snake_case , ctx_tokenizer=snake_case ) , batched=snake_case , batch_size=processing_args.batch_size , features=snake_case , )
# And finally save your dataset
__SCREAMING_SNAKE_CASE : str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(snake_case )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__SCREAMING_SNAKE_CASE : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=snake_case )
# And save the index
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(snake_case )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowerCAmelCase_ = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowerCAmelCase_ = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowerCAmelCase_ = field(
default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowerCAmelCase_ = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default=7_68 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowerCAmelCase_ = field(
default=1_28 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 303 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = data
snake_case_ = None
class UpperCamelCase :
def __init__( self) -> Dict:
snake_case_ = None
snake_case_ = None
def __iter__( self) -> Iterator[Any]:
snake_case_ = self.head
while self.head:
yield node.data
snake_case_ = node.next
if node == self.head:
break
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(len(self), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(0, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
if index < 0 or index > len(self):
raise IndexError('list index out of range.')
snake_case_ = Node(lowerCAmelCase__)
if self.head is None:
snake_case_ = new_node # first node points itself
snake_case_ = snake_case_ = new_node
elif index == 0: # insert at head
snake_case_ = self.head
snake_case_ = snake_case_ = new_node
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = new_node
if index == len(self) - 1: # insert at tail
snake_case_ = new_node
def a_ ( self) -> str:
return self.delete_nth(0)
def a_ ( self) -> Any:
return self.delete_nth(len(self) - 1)
def a_ ( self, lowerCAmelCase__ = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError('list index out of range.')
snake_case_ = self.head
if self.head == self.tail: # just one node
snake_case_ = snake_case_ = None
elif index == 0: # delete head node
snake_case_ = self.tail.next.next
snake_case_ = self.head.next
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = temp.next.next
if index == len(self) - 1: # delete at tail
snake_case_ = temp
return delete_node.data
def a_ ( self) -> bool:
return len(self) == 0
def UpperCAmelCase ( ) -> None:
snake_case_ = CircularLinkedList()
assert len(UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCAmelCase ) == i
circular_linked_list.insert_nth(UpperCAmelCase , i + 1 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''encodec'''
def __init__( self , _a=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , _a=24000 , _a=1 , _a=False , _a=None , _a=None , _a=128 , _a=32 , _a=1 , _a=[8, 5, 4, 2] , _a="weight_norm" , _a=7 , _a=7 , _a=3 , _a=2 , _a=True , _a="reflect" , _a=2 , _a=2 , _a=1.0 , _a=1024 , _a=None , _a=True , **_a , ) -> Tuple:
lowerCAmelCase_ = target_bandwidths
lowerCAmelCase_ = sampling_rate
lowerCAmelCase_ = audio_channels
lowerCAmelCase_ = normalize
lowerCAmelCase_ = chunk_length_s
lowerCAmelCase_ = overlap
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_filters
lowerCAmelCase_ = num_residual_layers
lowerCAmelCase_ = upsampling_ratios
lowerCAmelCase_ = norm_type
lowerCAmelCase_ = kernel_size
lowerCAmelCase_ = last_kernel_size
lowerCAmelCase_ = residual_kernel_size
lowerCAmelCase_ = dilation_growth_rate
lowerCAmelCase_ = use_causal_conv
lowerCAmelCase_ = pad_mode
lowerCAmelCase_ = compress
lowerCAmelCase_ = num_lstm_layers
lowerCAmelCase_ = trim_right_ratio
lowerCAmelCase_ = codebook_size
lowerCAmelCase_ = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**_a )
@property
def __a ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __a ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __a ( self ) -> int:
lowerCAmelCase_ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __a ( self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 22 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
def get_masked_lm_array(lowerCamelCase ):
lowerCamelCase : Optional[int] = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowerCamelCase : Union[str, Any] = tf.train.load_variable(lowerCamelCase, lowerCamelCase )
if "kernel" in name:
lowerCamelCase : int = array.transpose()
return torch.from_numpy(lowerCamelCase )
def get_encoder_array(lowerCamelCase ):
lowerCamelCase : Optional[int] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowerCamelCase : Optional[int] = tf.train.load_variable(lowerCamelCase, lowerCamelCase )
if "kernel" in name:
lowerCamelCase : List[str] = array.transpose()
return torch.from_numpy(lowerCamelCase )
def get_encoder_layer_array(lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowerCamelCase : Tuple = tf.train.load_variable(lowerCamelCase, lowerCamelCase )
if "kernel" in name:
lowerCamelCase : Dict = array.transpose()
return torch.from_numpy(lowerCamelCase )
def get_encoder_attention_layer_array(lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowerCamelCase : str = tf.train.load_variable(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[str] = array.reshape(lowerCamelCase )
if "kernel" in name:
lowerCamelCase : Optional[Any] = array.transpose()
return torch.from_numpy(lowerCamelCase )
print(F'''Loading model based on config from {config_path}...''' )
lowerCamelCase : Tuple = BertConfig.from_json_file(lowerCamelCase )
lowerCamelCase : Optional[Any] = BertForMaskedLM(lowerCamelCase )
# Layers
for layer_index in range(0, config.num_hidden_layers ):
lowerCamelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowerCamelCase : BertSelfAttention = layer.attention.self
lowerCamelCase : Dict = get_encoder_attention_layer_array(
lowerCamelCase, """_query_dense/kernel""", self_attn.query.weight.data.shape )
lowerCamelCase : Any = get_encoder_attention_layer_array(
lowerCamelCase, """_query_dense/bias""", self_attn.query.bias.data.shape )
lowerCamelCase : Tuple = get_encoder_attention_layer_array(
lowerCamelCase, """_key_dense/kernel""", self_attn.key.weight.data.shape )
lowerCamelCase : List[str] = get_encoder_attention_layer_array(
lowerCamelCase, """_key_dense/bias""", self_attn.key.bias.data.shape )
lowerCamelCase : Tuple = get_encoder_attention_layer_array(
lowerCamelCase, """_value_dense/kernel""", self_attn.value.weight.data.shape )
lowerCamelCase : Optional[int] = get_encoder_attention_layer_array(
lowerCamelCase, """_value_dense/bias""", self_attn.value.bias.data.shape )
# Self-attention Output
lowerCamelCase : BertSelfOutput = layer.attention.output
lowerCamelCase : List[Any] = get_encoder_attention_layer_array(
lowerCamelCase, """_output_dense/kernel""", self_output.dense.weight.data.shape )
lowerCamelCase : int = get_encoder_attention_layer_array(
lowerCamelCase, """_output_dense/bias""", self_output.dense.bias.data.shape )
lowerCamelCase : Dict = get_encoder_layer_array(lowerCamelCase, """_attention_layer_norm/gamma""" )
lowerCamelCase : int = get_encoder_layer_array(lowerCamelCase, """_attention_layer_norm/beta""" )
# Intermediate
lowerCamelCase : BertIntermediate = layer.intermediate
lowerCamelCase : Optional[int] = get_encoder_layer_array(lowerCamelCase, """_intermediate_dense/kernel""" )
lowerCamelCase : Union[str, Any] = get_encoder_layer_array(lowerCamelCase, """_intermediate_dense/bias""" )
# Output
lowerCamelCase : BertOutput = layer.output
lowerCamelCase : Tuple = get_encoder_layer_array(lowerCamelCase, """_output_dense/kernel""" )
lowerCamelCase : List[str] = get_encoder_layer_array(lowerCamelCase, """_output_dense/bias""" )
lowerCamelCase : Dict = get_encoder_layer_array(lowerCamelCase, """_output_layer_norm/gamma""" )
lowerCamelCase : List[str] = get_encoder_layer_array(lowerCamelCase, """_output_layer_norm/beta""" )
# Embeddings
lowerCamelCase : Any = get_encoder_array("""_position_embedding_layer/embeddings""" )
lowerCamelCase : Dict = get_encoder_array("""_type_embedding_layer/embeddings""" )
lowerCamelCase : Union[str, Any] = get_encoder_array("""_embedding_norm_layer/gamma""" )
lowerCamelCase : Union[str, Any] = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
lowerCamelCase : Any = model.cls.predictions.transform
lowerCamelCase : Optional[Any] = get_masked_lm_array("""dense/kernel""" )
lowerCamelCase : Optional[int] = get_masked_lm_array("""dense/bias""" )
lowerCamelCase : Dict = get_masked_lm_array("""layer_norm/gamma""" )
lowerCamelCase : Optional[int] = get_masked_lm_array("""layer_norm/beta""" )
lowerCamelCase : List[str] = get_masked_lm_array("""embedding_table""" )
# Pooling
lowerCamelCase : int = BertPooler(config=lowerCamelCase )
lowerCamelCase : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
lowerCamelCase : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(lowerCamelCase )
# Integration test - should load without any errors ;)
lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(lowerCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_lowerCamelCase =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 287 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : str = CustomTokenizer
pass
| 118 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : List[str] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=64 , lowerCAmelCase__=None ) -> Any:
a : int = np.random.default_rng(_lowerCamelCase )
a : List[str] = length
a : Dict = rng.normal(size=(length,) ).astype(np.floataa )
a : int = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=False ) -> Any:
super().__init__()
a : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
a : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
a : Optional[Any] = True
def __a ( self , lowerCAmelCase__=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
a : Dict = False
return x * self.a[0] + self.b[0]
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=False ) -> Union[str, Any]:
super().__init__()
a : Dict = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
a : Union[str, Any] = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
a : Tuple = True
def __a ( self , lowerCAmelCase__=None ) -> List[str]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
a : Union[str, Any] = False
return x * self.a + self.b
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : List[Any] = 16 ) ->List[str]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
a : int = AutoTokenizer.from_pretrained("bert-base-cased" )
a : Tuple = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
a : List[Any] = load_dataset("csv" , data_files=__UpperCamelCase )
a : Optional[int] = datasets["train"].unique("label" )
a : List[Any] = {v: i for i, v in enumerate(__UpperCamelCase )}
def tokenize_function(_lowercase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
a : str = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
if "label" in examples:
a : Dict = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a : int = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowercase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
a : str = DataLoader(tokenized_datasets["train"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 )
a : Optional[Any] = DataLoader(tokenized_datasets["validation"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 105 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase_ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowercase_ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase ( ):
"""simple docstring"""
__A = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__A = bs[:]
__A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
__A = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = set()
__A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A = char
return pairs
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Tuple = VOCAB_FILES_NAMES
A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ):
'''simple docstring'''
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, )
with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle:
__A = json.load(_lowerCamelCase )
__A = {v: k for k, v in self.encoder.items()}
__A = errors # how to handle errors in decoding
__A = bytes_to_unicode()
__A = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle:
__A = merges_handle.read().split('''\n''' )[1:-1]
__A = [tuple(merge.split() ) for merge in bpe_merges]
__A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) )
__A = {}
__A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__A = tuple(_lowerCamelCase )
__A = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
__A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A = bigram
__A = []
__A = 0
while i < len(_lowerCamelCase ):
try:
__A = word.index(_lowerCamelCase, _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A = tuple(_lowerCamelCase )
__A = new_word
if len(_lowerCamelCase ) == 1:
break
else:
__A = get_pairs(_lowerCamelCase )
__A = ''' '''.join(_lowerCamelCase )
__A = word
return word
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ):
'''simple docstring'''
__A = []
for token in re.findall(self.pat, _lowerCamelCase ):
__A = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ):
'''simple docstring'''
return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ):
'''simple docstring'''
return self.decoder.get(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ):
'''simple docstring'''
__A = ''''''.join(_lowerCamelCase )
__A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__A = os.path.join(
_lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(
_lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' )
__A = 0
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__A = token_index
writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
__A = ''' ''' + text
return (text, kwargs)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ):
'''simple docstring'''
__A = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
__A = ''' '''.join(_lowerCamelCase )
__A = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
__A = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 266 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class __snake_case (_a ):
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(_UpperCAmelCase ) )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : int = [sequences]
_lowerCAmelCase : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_UpperCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_a )
class __snake_case (_a ):
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=ZeroShotClassificationArgumentHandler() , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Any = args_parser
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict=TruncationStrategy.ONLY_FIRST , **_UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
_lowerCAmelCase : Any = self.tokenizer.eos_token
try:
_lowerCAmelCase : str = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , )
except Exception as e:
if "too short" in str(_UpperCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_lowerCAmelCase : Dict = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if kwargs.get("""multi_class""" , _UpperCAmelCase ) is not None:
_lowerCAmelCase : Optional[int] = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
_lowerCAmelCase : List[str] = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
_lowerCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""]
_lowerCAmelCase : Optional[Any] = {}
if "multi_label" in kwargs:
_lowerCAmelCase : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , _UpperCAmelCase : Union[str, List[str]] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : int , ) -> Optional[Any]:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
pass
elif len(_UpperCAmelCase ) == 1 and "candidate_labels" not in kwargs:
_lowerCAmelCase : List[Any] = args[0]
else:
raise ValueError(f"Unable to understand extra arguments {args}" )
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple="This example is {}." ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self._args_parser(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
_lowerCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_UpperCAmelCase ) - 1,
**model_input,
}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Dict = inputs["""candidate_label"""]
_lowerCAmelCase : Optional[Any] = inputs["""sequence"""]
_lowerCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_lowerCAmelCase : str = self.model(**_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=False ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [outputs["""candidate_label"""] for outputs in model_outputs]
_lowerCAmelCase : Tuple = [outputs["""sequence"""] for outputs in model_outputs]
_lowerCAmelCase : List[str] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
_lowerCAmelCase : int = logits.shape[0]
_lowerCAmelCase : str = len(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = N // n
_lowerCAmelCase : Optional[int] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_UpperCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_lowerCAmelCase : int = self.entailment_id
_lowerCAmelCase : List[str] = -1 if entailment_id == 0 else 0
_lowerCAmelCase : Union[str, Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
_lowerCAmelCase : Any = np.exp(_UpperCAmelCase ) / np.exp(_UpperCAmelCase ).sum(-1 , keepdims=_UpperCAmelCase )
_lowerCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_lowerCAmelCase : str = reshaped_outputs[..., self.entailment_id]
_lowerCAmelCase : Any = np.exp(_UpperCAmelCase ) / np.exp(_UpperCAmelCase ).sum(-1 , keepdims=_UpperCAmelCase )
_lowerCAmelCase : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 159 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case (_a ):
lowerCAmelCase__ = "ibert"
def __init__( self : int , _UpperCAmelCase : Optional[int]=3_0522 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=1E-12 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any="none" , **_UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : str = position_embedding_type
_lowerCAmelCase : int = quant_mode
_lowerCAmelCase : str = force_dequant
class __snake_case (_a ):
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 159 | 1 |
from __future__ import annotations
import time
A__ = list[tuple[int, int]]
A__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = pos_x
_lowerCAmelCase = pos_y
_lowerCAmelCase = (pos_y, pos_x)
_lowerCAmelCase = goal_x
_lowerCAmelCase = goal_y
_lowerCAmelCase = parent
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , _snake_case )
_lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , _snake_case )
_lowerCAmelCase = [self.start]
_lowerCAmelCase = False
def snake_case ( self ):
"""simple docstring"""
while self.node_queue:
_lowerCAmelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_lowerCAmelCase = True
return self.retrace_path(_snake_case )
_lowerCAmelCase = self.get_successors(_snake_case )
for node in successors:
self.node_queue.append(_snake_case )
if not self.reached:
return [self.start.pos]
return None
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
for action in delta:
_lowerCAmelCase = parent.pos_x + action[1]
_lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , _snake_case ) )
return successors
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = node
_lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase = current_node.parent
path.reverse()
return path
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = BreadthFirstSearch(_snake_case , _snake_case )
_lowerCAmelCase = BreadthFirstSearch(_snake_case , _snake_case )
_lowerCAmelCase = False
def snake_case ( self ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_lowerCAmelCase = self.fwd_bfs.node_queue.pop(0 )
_lowerCAmelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_lowerCAmelCase = True
return self.retrace_bidirectional_path(
_snake_case , _snake_case )
_lowerCAmelCase = current_bwd_node
_lowerCAmelCase = current_fwd_node
_lowerCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(_snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(_snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.fwd_bfs.retrace_path(_snake_case )
_lowerCAmelCase = self.bwd_bfs.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A__ = (0, 0)
A__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A__ = time.time()
A__ = BreadthFirstSearch(init, goal)
A__ = bfs.search()
A__ = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
A__ = time.time()
A__ = BidirectionalBreadthFirstSearch(init, goal)
A__ = bd_bfs.search()
A__ = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 82 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __lowerCAmelCase ( lowerCamelCase__ ):
# to overwrite at feature extractactor specific tests
__lowerCamelCase = None
__lowerCamelCase = None
@property
def snake_case ( self ):
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , """feature_size""" ) )
self.assertTrue(hasattr(_snake_case , """sampling_rate""" ) )
self.assertTrue(hasattr(_snake_case , """padding_value""" ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case , processed_features[input_name] ) ) )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def snake_case ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1e-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = self.feat_extract_tester.seq_length_diff
_lowerCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
_lowerCAmelCase = self.feat_extract_tester.min_seq_length
_lowerCAmelCase = self.feat_extract_tester.batch_size
_lowerCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(_snake_case , padding=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""max_length""" )[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=_snake_case , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(_snake_case , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_snake_case , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(all(len(_snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
_lowerCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_lowerCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def snake_case ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1e-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to smallest with np
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_snake_case , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to middle
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_snake_case , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""longest""" , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""longest""" , truncation=_snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""max_length""" , truncation=_snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = 12
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , truncation=_snake_case , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , )
_lowerCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_lowerCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_lowerCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
def snake_case ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**_snake_case )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**_snake_case )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = min(_snake_case )
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=_snake_case , truncation=_snake_case , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 82 | 1 |
import sys
import turtle
def snake_case_ (__A : List[str] , __A : Any ) -> Optional[Any]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def snake_case_ (__A : str , __A : str , __A : int , __A : Union[str, Any] , ) -> List[Any]:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__UpperCAmelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__UpperCAmelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 353 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =(EulerDiscreteScheduler,)
lowerCamelCase : Dict =10
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.scheduler_classes[0]
__lowerCAmelCase : int = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Tuple = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = output.prev_sample
__lowerCAmelCase : str = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : List[Any] = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Any = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Dict = output.prev_sample
__lowerCAmelCase : List[str] = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : Dict = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : Dict = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Any = output.prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase , use_karras_sigmas=lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : int = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : List[Any] = output.prev_sample
__lowerCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 139 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> bool:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(_lowerCAmelCase ) == 1:
return True
snake_case__ : List[str] = series[1] - series[0]
for index in range(len(_lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __snake_case( _lowerCAmelCase ) -> float:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
snake_case__ : Any = 0
for val in series:
answer += val
return answer / len(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = CustomTokenizer
pass
| 35 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
a__ : Optional[int] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def _lowercase ( __A = "mumbai" ):
'''simple docstring'''
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content ,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" ,attrs={"""data-tn-component""": """organicJob"""} ):
__UpperCamelCase = job.find("""a""" ,attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__UpperCamelCase = job.find("""span""" ,{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 243 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def __lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=1_0 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 1_0) )
__UpperCamelCase = pipe(generator=lowercase , encoding=lowercase )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 243 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__A : List[str] = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Any = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : str , **A : List[Any] ) -> Union[str, Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : List[str] = deprecated_arg[3:]
setattr(self , A , not kwargs.pop(A ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Optional[Any] = kwargs.pop('''torchscript''' , self.torchscript )
lowercase_ : Tuple = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowercase_ : Any = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**A )
SCREAMING_SNAKE_CASE_ : bool = field(default=_A , metadata={"help": "Trace the models using torchscript"} )
SCREAMING_SNAKE_CASE_ : bool = field(default=_A , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
SCREAMING_SNAKE_CASE_ : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def A ( self : Optional[Any] ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowercase_ : List[Any] = torch.device('''cpu''' )
lowercase_ : Optional[int] = 0
elif is_torch_tpu_available():
lowercase_ : Optional[Any] = xm.xla_device()
lowercase_ : Union[str, Any] = 0
else:
lowercase_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase_ : str = torch.cuda.device_count()
return device, n_gpu
@property
def A ( self : Any ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def A ( self : str ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def A ( self : int ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def A ( self : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def A ( self : List[str] ) -> Optional[Any]:
return self.n_gpu > 0
| 33 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
'''simple docstring'''
from __future__ import annotations
lowercase : Union[str, Any] = '#'
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = {}
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self._trie
for char in text:
if char not in trie:
_snake_case = {}
_snake_case = trie[char]
_snake_case = True
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self._trie
for char in prefix:
if char in trie:
_snake_case = trie[char]
else:
return []
return self._elements(_SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = []
for c, v in d.items():
_snake_case = [' '] if c == END else [(c + s) for s in self._elements(_SCREAMING_SNAKE_CASE )]
result.extend(_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
lowercase : Dict = Trie()
lowercase : Optional[Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def lowercase_ ( __A ) -> tuple:
_snake_case = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def lowercase_ ( ) -> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = []
_snake_case = 0
_snake_case = 0
def lowerCamelCase ( self ):
"""simple docstring"""
return self.head == self.tail
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.data.append(lowerCAmelCase_ )
_snake_case = self.tail + 1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.data[self.head]
_snake_case = self.head + 1
return ret
def lowerCamelCase ( self ):
"""simple docstring"""
return self.tail - self.head
def lowerCamelCase ( self ):
"""simple docstring"""
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = data
_snake_case = None
_snake_case = None
_snake_case = 1
def lowerCamelCase ( self ):
"""simple docstring"""
return self.data
def lowerCamelCase ( self ):
"""simple docstring"""
return self.left
def lowerCamelCase ( self ):
"""simple docstring"""
return self.right
def lowerCamelCase ( self ):
"""simple docstring"""
return self.height
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = data
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = node
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = node
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = height
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
if node is None:
return 0
return node.get_height()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
if a > b:
return a
return b
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
print('left rotation node:' , node.get_data() )
_snake_case = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
print('right rotation node:' , node.get_data() )
_snake_case = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
_snake_case = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__A ) )
return right_rotation(__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
_snake_case = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__A ) )
return left_rotation(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> MyNode | None:
if node is None:
return MyNode(__A )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __A ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_snake_case = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_snake_case = right_rotation(__A )
else:
_snake_case = lr_rotation(__A )
else:
node.set_right(insert_node(node.get_right() , __A ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_snake_case = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_snake_case = rl_rotation(__A )
else:
_snake_case = left_rotation(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
return node
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
while True:
_snake_case = root.get_right()
if right_child is None:
break
_snake_case = right_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
while True:
_snake_case = root.get_left()
if left_child is None:
break
_snake_case = left_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> MyNode | None:
_snake_case = root.get_left()
_snake_case = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_snake_case = get_left_most(__A )
root.set_data(__A )
root.set_right(del_node(__A , __A ) )
elif left_child is not None:
_snake_case = left_child
elif right_child is not None:
_snake_case = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(__A , __A ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__A , __A ) )
if get_height(__A ) - get_height(__A ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_snake_case = left_rotation(__A )
else:
_snake_case = rl_rotation(__A )
elif get_height(__A ) - get_height(__A ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_snake_case = right_rotation(__A )
else:
_snake_case = lr_rotation(__A )
_snake_case = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__A )
return root
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = None
def lowerCamelCase ( self ):
"""simple docstring"""
return get_height(self.root )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
print('insert:' + str(lowerCAmelCase_ ) )
_snake_case = insert_node(self.root , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
print('delete:' + str(lowerCAmelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
_snake_case = del_node(self.root , lowerCAmelCase_ )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
_snake_case = ''
_snake_case = MyQueue()
q.push(self.root )
_snake_case = self.get_height()
if layer == 0:
return output
_snake_case = 0
while not q.is_empty():
_snake_case = q.pop()
_snake_case = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase_ )
q.push(lowerCAmelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_snake_case = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , lowerCAmelCase_ ) - 1:
_snake_case = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def SCREAMING_SNAKE_CASE__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase : List[Any] = AVLtree()
lowercase : Dict = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 160 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a :
def __init__( self , __magic_name__ , __magic_name__=2 , __magic_name__=True , __magic_name__=False , __magic_name__=10 , __magic_name__=3 , __magic_name__=32 * 4 , __magic_name__=32 * 6 , __magic_name__=4 , __magic_name__=32 , ) -> str:
_a = parent
_a = batch_size
_a = is_training
_a = use_auxiliary_loss
_a = num_queries
_a = num_channels
_a = min_size
_a = max_size
_a = num_labels
_a = mask_feature_size
def __UpperCAmelCase ( self ) -> str:
_a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__magic_name__ )
_a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__magic_name__ )
_a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__magic_name__ ) > 0.5
).float()
_a = (torch.rand((self.batch_size, self.num_labels) , device=__magic_name__ ) > 0.5).long()
_a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCAmelCase ( self ) -> int:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a , _a , _a , _a = self.prepare_config_and_inputs()
_a = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = output.encoder_hidden_states
_a = output.pixel_decoder_hidden_states
_a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , config.decoder_config.decoder_layers )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
with torch.no_grad():
_a = MaskFormerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
_a = model(__magic_name__ , output_hidden_states=__magic_name__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = MaskFormerForInstanceSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
def comm_check_on_output(__magic_name__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
_a = model(__magic_name__ )
comm_check_on_output(__magic_name__ )
_a = model(
pixel_values=__magic_name__ , pixel_mask=__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
comm_check_on_output(__magic_name__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCAmelCase = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = MaskFormerModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def __UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> str:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__magic_name__ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def __UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def __UpperCAmelCase ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__magic_name__ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_a = MaskFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = (self.model_tester.min_size,) * 2
_a = {
'pixel_values': torch.randn((2, 3, *size) , device=__magic_name__ ),
'mask_labels': torch.randn((2, 10, *size) , device=__magic_name__ ),
'class_labels': torch.zeros(2 , 10 , device=__magic_name__ ).long(),
}
_a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__magic_name__ )
_a = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
def __UpperCAmelCase ( self ) -> int:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def __UpperCAmelCase ( self ) -> Tuple:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__magic_name__ ).to(__magic_name__ )
_a = model(**__magic_name__ , output_attentions=__magic_name__ )
self.assertTrue(outputs.attentions is not None )
def __UpperCAmelCase ( self ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_a = self.all_model_classes[1]
_a , _a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
_a = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ).loss
loss.backward()
def __UpperCAmelCase ( self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
_a = self.all_model_classes[1]
_a , _a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
_a = True
_a = True
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
_a = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
_a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__magic_name__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a_ : Tuple = 1E-4
def _A () -> Any:
'''simple docstring'''
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class a ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ) -> str:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def __UpperCAmelCase ( self ) -> str:
_a = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(__magic_name__ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
_a = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_a = model(**__magic_name__ )
_a = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
_a = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
_a = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[str]:
_a = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(__magic_name__ )
.eval()
)
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
_a = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_a = model(**__magic_name__ )
# masks_queries_logits
_a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_a = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
_a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(__magic_name__ )
.eval()
)
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
_a = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_a = model(**__magic_name__ )
# masks_queries_logits
_a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_a = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
_a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Any:
_a = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(__magic_name__ )
.eval()
)
_a = self.default_image_processor
_a = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='pt' , )
_a = inputs['pixel_values'].to(__magic_name__ )
_a = [el.to(__magic_name__ ) for el in inputs['mask_labels']]
_a = [el.to(__magic_name__ ) for el in inputs['class_labels']]
with torch.no_grad():
_a = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
| 168 |
'''simple docstring'''
import itertools
import math
def _A (lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A () -> List[str]:
'''simple docstring'''
_a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def _A (lowerCAmelCase__ :int = 1_00_01 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 168 | 1 |
'''simple docstring'''
def a__ ( a__ = 10_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = -1
__SCREAMING_SNAKE_CASE = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__SCREAMING_SNAKE_CASE = (n * n - 2 * a * n) // (2 * n - 2 * a)
__SCREAMING_SNAKE_CASE = n - a - b
if c * c == (a * a + b * b):
__SCREAMING_SNAKE_CASE = a * b * c
if candidate >= product:
__SCREAMING_SNAKE_CASE = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 331 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ = 10000
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ = ParquetConfig
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 331 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
snake_case = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(_A ) , _A )
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_A ) , x.transpose() ) )
snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
snake_case = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
snake_case = np.random.randn(3 , 4 , 5 )
snake_case = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
snake_case = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
snake_case = np.random.randn(3 , 4 , 5 )
snake_case = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
snake_case = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ) , np.asarray(transpose(_A ) ) ) )
snake_case = np.random.randn(3 , 4 , 5 )
snake_case = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , np.asarray(transpose(_A , axes=(1, 2, 0) ) ) ) )
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.reshape(_A , (4, 3) ) ) )
snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_A , (1_2, 5) ) , np.reshape(_A , (1_2, 5) ) ) )
@require_torch
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
snake_case = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
snake_case = np.random.randn(3 , 4 , 5 )
snake_case = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (1_2, 5) ) , reshape(_A , (1_2, 5) ).numpy() ) )
@require_tf
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
snake_case = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
snake_case = np.random.randn(3 , 4 , 5 )
snake_case = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (1_2, 5) ) , reshape(_A , (1_2, 5) ).numpy() ) )
@require_flax
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
snake_case = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.asarray(reshape(_A , (4, 3) ) ) ) )
snake_case = np.random.randn(3 , 4 , 5 )
snake_case = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (1_2, 5) ) , np.asarray(reshape(_A , (1_2, 5) ) ) ) )
def a_ ( self ):
snake_case = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_A ) , np.squeeze(_A ) ) )
snake_case = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.squeeze(_A , axis=2 ) ) )
@require_torch
def a_ ( self ):
snake_case = np.random.randn(1 , 3 , 4 )
snake_case = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
snake_case = np.random.randn(1 , 4 , 1 , 5 )
snake_case = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_tf
def a_ ( self ):
snake_case = np.random.randn(1 , 3 , 4 )
snake_case = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
snake_case = np.random.randn(1 , 4 , 1 , 5 )
snake_case = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_flax
def a_ ( self ):
snake_case = np.random.randn(1 , 3 , 4 )
snake_case = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ) , np.asarray(squeeze(_A ) ) ) )
snake_case = np.random.randn(1 , 4 , 1 , 5 )
snake_case = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.asarray(squeeze(_A , axis=2 ) ) ) )
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.expand_dims(_A , axis=1 ) ) )
@require_torch
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
snake_case = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_tf
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
snake_case = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_flax
def a_ ( self ):
snake_case = np.random.randn(3 , 4 )
snake_case = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.asarray(expand_dims(_A , axis=1 ) ) ) )
| 364 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case = ''''''
else:
snake_case = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
snake_case = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = in_proj_bias[: config.hidden_size]
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = dct.pop(UpperCamelCase_ )
snake_case = val
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = ViTMSNConfig()
snake_case = 10_00
snake_case = '''datasets/huggingface/label-files'''
snake_case = '''imagenet-1k-id2label.json'''
snake_case = json.load(open(hf_hub_download(UpperCamelCase_ ,UpperCamelCase_ ) ,'''r''' ) )
snake_case = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
snake_case = 3_84
snake_case = 15_36
snake_case = 6
elif "l16" in checkpoint_url:
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 0.1
elif "b4" in checkpoint_url:
snake_case = 4
elif "l7" in checkpoint_url:
snake_case = 7
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 0.1
snake_case = ViTMSNModel(UpperCamelCase_ )
snake_case = torch.hub.load_state_dict_from_url(UpperCamelCase_ ,map_location='''cpu''' )['''target_encoder''']
snake_case = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
snake_case = create_rename_keys(UpperCamelCase_ ,base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ ,UpperCamelCase_ ,base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(UpperCamelCase_ ,stream=UpperCamelCase_ ).raw )
snake_case = ViTImageProcessor(
size=config.image_size ,image_mean=UpperCamelCase_ ,image_std=UpperCamelCase_ )
snake_case = image_processor(images=UpperCamelCase_ ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case = model(**UpperCamelCase_ )
snake_case = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
snake_case = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
snake_case = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
snake_case = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
snake_case = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
snake_case = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,UpperCamelCase_ ,atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 213 | 0 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["sentencepiece"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> List[Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["sentencepiece"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: Any ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: List[str] ,*lowerCamelCase_: int ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Dict ) -> Any:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["sentencepiece"]
def __init__( self: Any ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: str ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: List[str] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: str ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Union[str, Any] ) -> Any:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Optional[int] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Optional[int] ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["sentencepiece"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Union[str, Any] ) -> List[Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["sentencepiece"]
def __init__( self: List[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Optional[int]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["sentencepiece"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["sentencepiece"]
def __init__( self: List[Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[int]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["sentencepiece"]
def __init__( self: Optional[int] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Tuple:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: str ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Dict ) -> Tuple:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["sentencepiece"]
def __init__( self: List[str] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Any ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Optional[Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: Union[str, Any] ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["sentencepiece"]
def __init__( self: List[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Tuple ) -> int:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Union[str, Any] ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: int ,**lowerCamelCase_: List[Any] ) -> Optional[int]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["sentencepiece"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""sentencepiece"""] )
| 345 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 1 |
from __future__ import annotations
lowercase_ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , a : dict[str, list[str]] , a : str )-> None:
"""simple docstring"""
lowercase__ = graph
# mapping node to its parent in resulting breadth first tree
lowercase__ = {}
lowercase__ = source_vertex
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> None:
"""simple docstring"""
lowercase__ = {self.source_vertex}
lowercase__ = None
lowercase__ = [self.source_vertex] # first in first out queue
while queue:
lowercase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a )
lowercase__ = vertex
queue.append(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str )-> str:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase__ = self.parent.get(a )
if target_vertex_parent is None:
lowercase__ = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(a )
return self.shortest_path(a ) + f"""->{target_vertex}"""
if __name__ == "__main__":
lowercase_ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 269 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[int, int]:
def constraint_to_multiple_of(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else output_size
lowercase__ , lowercase__ = get_image_size(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=_SCREAMING_SNAKE_CASE )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=_SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = ['pixel_values']
def __init__( self : Any , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = False , a : int = 1 , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , )-> None:
"""simple docstring"""
super().__init__(**a )
lowercase__ = size if size is not None else {'height': 384, 'width': 384}
lowercase__ = get_size_dict(a )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : bool = False , a : int = 1 , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase__ = get_resize_output_image_size(
a , output_size=(size['height'], size['width']) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , )-> str:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , )-> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : ImageInput , a : bool = None , a : int = None , a : bool = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : str , )-> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(a )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : List[Tuple] = None )-> Optional[int]:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(a ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(a ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=a )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 269 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''unispeech'''
def __init__( self : Union[str, Any] , _A : Optional[Any]=32 , _A : Dict=768 , _A : Optional[int]=12 , _A : Union[str, Any]=12 , _A : int=3072 , _A : List[str]="gelu" , _A : Dict=0.1 , _A : int=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=0.0 , _A : Any=0.0 , _A : str=0.1 , _A : Any=0.1 , _A : int=0.02 , _A : Union[str, Any]=1e-5 , _A : List[str]="group" , _A : Any="gelu" , _A : str=(512, 512, 512, 512, 512, 512, 512) , _A : List[Any]=(5, 2, 2, 2, 2, 2, 2) , _A : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , _A : str=False , _A : Dict=128 , _A : Union[str, Any]=16 , _A : List[Any]=False , _A : Union[str, Any]=True , _A : Any=0.05 , _A : Optional[Any]=10 , _A : Optional[int]=2 , _A : Dict=0.0 , _A : Dict=10 , _A : Optional[Any]=0 , _A : List[str]=320 , _A : Union[str, Any]=2 , _A : int=0.1 , _A : List[str]=100 , _A : Optional[int]=256 , _A : Union[str, Any]=256 , _A : int=0.1 , _A : Union[str, Any]="mean" , _A : List[Any]=False , _A : Any=False , _A : Tuple=256 , _A : str=80 , _A : List[str]=0 , _A : Tuple=1 , _A : List[str]=2 , _A : List[str]=0.5 , **_A : Any , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_norm
__SCREAMING_SNAKE_CASE : int = feat_extract_activation
__SCREAMING_SNAKE_CASE : Optional[int] = list(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = list(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = list(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = conv_bias
__SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE : Any = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.conv_dim )
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout
__SCREAMING_SNAKE_CASE : List[str] = attention_dropout
__SCREAMING_SNAKE_CASE : Dict = activation_dropout
__SCREAMING_SNAKE_CASE : Optional[int] = feat_proj_dropout
__SCREAMING_SNAKE_CASE : str = final_dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
__SCREAMING_SNAKE_CASE : Dict = num_ctc_classes
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Any = do_stable_layer_norm
__SCREAMING_SNAKE_CASE : Optional[Any] = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE : List[str] = apply_spec_augment
__SCREAMING_SNAKE_CASE : Any = mask_time_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = mask_time_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_min_masks
__SCREAMING_SNAKE_CASE : Tuple = mask_feature_prob
__SCREAMING_SNAKE_CASE : Any = mask_feature_length
__SCREAMING_SNAKE_CASE : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE : Optional[Any] = num_codevectors_per_group
__SCREAMING_SNAKE_CASE : Dict = num_codevector_groups
__SCREAMING_SNAKE_CASE : Optional[Any] = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE : Optional[Any] = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE : List[Any] = num_negatives
__SCREAMING_SNAKE_CASE : str = codevector_dim
__SCREAMING_SNAKE_CASE : str = proj_codevector_dim
__SCREAMING_SNAKE_CASE : List[Any] = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE : Optional[int] = ctc_loss_reduction
__SCREAMING_SNAKE_CASE : str = ctc_zero_infinity
# pretraining loss
__SCREAMING_SNAKE_CASE : Any = replace_prob
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 303 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowercase : Optional[int] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowercase : Dict = concatenate_datasets
lowercase : List[Any] = DownloadConfig
lowercase : Any = DownloadManager
lowercase : str = DownloadMode
lowercase : Optional[Any] = DownloadConfig
lowercase : str = DownloadMode
lowercase : Optional[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 285 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase : Tuple = """<<<<<<< This should probably be modified because it mentions: """
lowercase : Any = """=======
>>>>>>>
"""
lowercase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __snake_case ( lowerCAmelCase ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : str = parser.add_parser(
"""convert""" ,help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" ,)
train_parser.add_argument(
"""--tfds_path""" ,type=snake_case ,required=snake_case ,help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" ,)
train_parser.add_argument(
"""--datasets_directory""" ,type=snake_case ,required=snake_case ,help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=snake_case )
def __init__( self ,snake_case ,snake_case ,*snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = get_logger("""datasets-cli/converting""" )
lowercase : Optional[int] = tfds_path
lowercase : Dict = datasets_directory
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
lowercase : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowercase : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
lowercase : List[Any] = []
lowercase : Optional[int] = []
lowercase : Dict = {}
if os.path.isdir(self._tfds_path ):
lowercase : int = os.listdir(snake_case )
else:
lowercase : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
lowercase : List[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(snake_case ,encoding="""utf-8""" ) as f:
lowercase : str = f.readlines()
lowercase : Union[str, Any] = []
lowercase : Optional[Any] = False
lowercase : Optional[Any] = False
lowercase : Optional[int] = []
for line in lines:
lowercase : int = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase : Union[str, Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
lowercase : List[Any] = """"""
continue
elif "from absl import logging" in out_line:
lowercase : Optional[int] = """from datasets import logging\n"""
elif "getLogger" in out_line:
lowercase : Any = out_line.replace("""getLogger""" ,"""get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase : Optional[Any] = True
lowercase : Optional[Any] = list(filter(lambda snake_case : e in out_line ,snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + """\n""" )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase : Union[str, Any] = re.sub(snake_case ,snake_case ,snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" ,snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowercase : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase : Any = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase : Union[str, Any] = f_name.replace(""".py""" ,"""""" )
lowercase : Optional[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
os.makedirs(snake_case ,exist_ok=snake_case )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines(snake_case )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
lowercase : Optional[int] = os.path.basename(snake_case )
lowercase : int = imports_to_builder_map[f_name.replace(""".py""" ,"""""" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(snake_case ,snake_case )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 285 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''luke'''
def __init__( self , __lowerCAmelCase=50267 , __lowerCAmelCase=500000 , __lowerCAmelCase=768 , __lowerCAmelCase=256 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
lowerCAmelCase = vocab_size
lowerCAmelCase = entity_vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = entity_emb_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_entity_aware_attention
lowerCAmelCase = classifier_dropout
| 272 |
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCamelCase_( snake_case__: str , snake_case__: str , **snake_case__: Tuple ) -> Optional[Any]:
UpperCAmelCase__ = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_config(snake_case__ )
model.save_pretrained(snake_case__ )
AutoTokenizer.from_pretrained(snake_case__ ).save_pretrained(snake_case__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 356 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 0 |
from __future__ import annotations
import math
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : int = str(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = [n]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if len(str(SCREAMING_SNAKE_CASE_ ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[:3] ) ):
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 11 ):
'''simple docstring'''
lowercase__ : list[int] = []
lowercase__ : Union[str, Any] = 13
while len(SCREAMING_SNAKE_CASE_ ) != count:
if validate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : List[str] = list_truncated_nums(SCREAMING_SNAKE_CASE_ )
if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE_ )
num += 2
return list_truncated_primes
def snake_case__ ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 214 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case_ = logging.getLogger(__name__)
snake_case_ = 50 # max width of layer names
snake_case_ = 70 # max width of quantizer names
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
lowercase__ : str = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=SCREAMING_SNAKE_CASE_ , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=SCREAMING_SNAKE_CASE_ , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=SCREAMING_SNAKE_CASE_ , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=SCREAMING_SNAKE_CASE_ , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=SCREAMING_SNAKE_CASE_ , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=SCREAMING_SNAKE_CASE_ , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
if args.calibrator == "max":
lowercase__ : Optional[int] = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
lowercase__ : Optional[Any] = 'histogram'
elif args.calibrator == "mse":
lowercase__ : int = 'histogram'
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
lowercase__ : Any = QuantDescriptor(num_bits=args.aprec , calib_method=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(SCREAMING_SNAKE_CASE_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Any=False ):
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , ['embeddings'] , which='weight' , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_disable:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , [''] , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_disable_keyword:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , args.quant_disable_keyword , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=SCREAMING_SNAKE_CASE_ )
if args.recalibrate_weights:
recalibrate_weights(SCREAMING_SNAKE_CASE_ )
if args.fuse_qkv:
fuse_qkv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if args.clip_gelu:
clip_gelu(SCREAMING_SNAKE_CASE_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
def fusea(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(SCREAMING_SNAKE_CASE_ , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
lowercase__ : List[Any] = qq._amax.detach().item()
lowercase__ : Optional[int] = qk._amax.detach().item()
lowercase__ : List[str] = qv._amax.detach().item()
lowercase__ : Tuple = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
qq._amax.fill_(SCREAMING_SNAKE_CASE_ )
qk._amax.fill_(SCREAMING_SNAKE_CASE_ )
qv._amax.fill_(SCREAMING_SNAKE_CASE_ )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
lowercase__ : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
lowercase__ : Union[str, Any] = mod.weight.shape[0]
lowercase__ : str = mod._weight_quantizer._amax.detach()
lowercase__ : Any = torch.ones(SCREAMING_SNAKE_CASE_ , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowercase__ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowercase__ : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
lowercase__ : str = pytorch_quantization.utils.reduce_amax(mod.weight , axis=SCREAMING_SNAKE_CASE_ , keepdims=SCREAMING_SNAKE_CASE_ ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowercase__ : Union[str, Any] = amax
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=25 , SCREAMING_SNAKE_CASE_ : Any=180 , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
'''simple docstring'''
if ignore is None:
lowercase__ : Tuple = []
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : str = [ignore]
lowercase__ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
continue
lowercase__ : Optional[Any] = max(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
for name, mod in model.named_modules():
lowercase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , '_input_quantizer' , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' , SCREAMING_SNAKE_CASE_ )
if not hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
continue
if type(SCREAMING_SNAKE_CASE_ ) in ignore:
continue
if [True for s in ignore if type(SCREAMING_SNAKE_CASE_ ) is str and s in name]:
continue
lowercase__ : Optional[int] = f"""Act:{input_q.extra_repr()}"""
lowercase__ : Dict = f"""Wgt:{weight_q.extra_repr()}"""
lowercase__ : Tuple = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(SCREAMING_SNAKE_CASE_ ) <= line_width:
logger.info(SCREAMING_SNAKE_CASE_ )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{" ":{name_width}} {wgt_str}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : List[str] = 0
for name, mod in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
lowercase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if quantizer_mod is not None:
assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="both" , **SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
lowercase__ : Optional[int] = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '_input_quantizer' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if which in ["weight", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '_weight_quantizer' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE_ , '_input_quantizer' ) or hasattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
set_quantizers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : Any = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(SCREAMING_SNAKE_CASE_ )
| 214 | 1 |
__snake_case : List[str] ='\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : List[Any] =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Union[str, Any] ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 94 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Any=True ,lowerCamelCase_ : Tuple="pt"):
'''simple docstring'''
lowerCAmelCase__ : Tuple = {'''add_prefix_space''': True} if isinstance(lowerCamelCase_ ,lowerCamelCase_) and not line.startswith(''' ''') else {}
lowerCAmelCase__ : Union[str, Any] = padding_side
return tokenizer(
[line] ,max_length=lowerCamelCase_ ,padding='''max_length''' if pad_to_max_length else None ,truncation=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,**lowerCamelCase_ ,)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Any=None ,):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = input_ids.ne(lowerCamelCase_).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase="train" ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase="" ,) -> List[str]:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : str = Path(__lowerCamelCase ).joinpath(type_path + '''.source''' )
lowerCAmelCase__ : int = Path(__lowerCamelCase ).joinpath(type_path + '''.target''' )
lowerCAmelCase__ : Tuple = self.get_char_lens(self.src_file )
lowerCAmelCase__ : Dict = max_source_length
lowerCAmelCase__ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowerCAmelCase__ : Tuple = tokenizer
lowerCAmelCase__ : List[Any] = prefix
if n_obs is not None:
lowerCAmelCase__ : Optional[Any] = self.src_lens[:n_obs]
lowerCAmelCase__ : Any = src_lang
lowerCAmelCase__ : Optional[Any] = tgt_lang
def __len__(self ) -> str:
"""simple docstring"""
return len(self.src_lens )
def __getitem__(self ,__lowerCamelCase ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = index + 1 # linecache starts at 1
lowerCAmelCase__ : Any = self.prefix + linecache.getline(str(self.src_file ) ,__lowerCamelCase ).rstrip('''\n''' )
lowerCAmelCase__ : Any = linecache.getline(str(self.tgt_file ) ,__lowerCamelCase ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,__lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase__ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,__lowerCamelCase ) else self.tokenizer
)
lowerCAmelCase__ : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,__lowerCamelCase ) else self.tokenizer
lowerCAmelCase__ : Union[str, Any] = encode_line(__lowerCamelCase ,__lowerCamelCase ,self.max_source_length ,'''right''' )
lowerCAmelCase__ : Any = encode_line(__lowerCamelCase ,__lowerCamelCase ,self.max_target_length ,'''right''' )
lowerCAmelCase__ : List[str] = source_inputs['''input_ids'''].squeeze()
lowerCAmelCase__ : str = target_inputs['''input_ids'''].squeeze()
lowerCAmelCase__ : Tuple = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return [len(__lowerCamelCase ) for x in Path(__lowerCamelCase ).open().readlines()]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowerCAmelCase__ : Union[str, Any] = torch.stack([x['''attention_mask'''] for x in batch] )
lowerCAmelCase__ : List[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCAmelCase__ : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,__lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase__ : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,__lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase__ : Dict = trim_batch(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = trim_batch(__lowerCamelCase ,__lowerCamelCase ,attention_mask=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__snake_case : Any =getLogger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : List[List]):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase_))
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : int = get_git_info()
save_json(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,'''git_log.json'''))
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Tuple=4 ,**lowerCamelCase_ : List[str]):
'''simple docstring'''
with open(lowerCamelCase_ ,'''w''') as f:
json.dump(lowerCamelCase_ ,lowerCamelCase_ ,indent=lowerCamelCase_ ,**lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
with open(lowerCamelCase_) as f:
return json.load(lowerCamelCase_)
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : str = git.Repo(search_parent_directories=lowerCamelCase_)
lowerCAmelCase__ : List[Any] = {
'''repo_id''': str(lowerCamelCase_),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def lowerCAmelCase__ ( lowerCamelCase_ : Callable ,lowerCamelCase_ : Iterable):
'''simple docstring'''
return list(map(lowerCamelCase_ ,lowerCamelCase_))
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
with open(lowerCamelCase_ ,'''wb''') as f:
return pickle.dump(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
def remove_articles(lowerCamelCase_ : List[str]):
return re.sub(r'''\b(a|an|the)\b''' ,''' ''' ,lowerCamelCase_)
def white_space_fix(lowerCamelCase_ : Optional[int]):
return " ".join(text.split())
def remove_punc(lowerCamelCase_ : List[str]):
lowerCAmelCase__ : List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase_ : Optional[int]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_))))
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = normalize_answer(lowerCamelCase_).split()
lowerCAmelCase__ : str = normalize_answer(lowerCamelCase_).split()
lowerCAmelCase__ : str = Counter(lowerCamelCase_) & Counter(lowerCamelCase_)
lowerCAmelCase__ : Dict = sum(common.values())
if num_same == 0:
return 0
lowerCAmelCase__ : Optional[int] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Any):
'''simple docstring'''
return normalize_answer(lowerCamelCase_) == normalize_answer(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : List[str]):
'''simple docstring'''
assert len(lowerCamelCase_) == len(lowerCamelCase_)
lowerCAmelCase__ : List[str] = 0
for hypo, pred in zip(lowerCamelCase_ ,lowerCamelCase_):
em += exact_match_score(lowerCamelCase_ ,lowerCamelCase_)
if len(lowerCamelCase_) > 0:
em /= len(lowerCamelCase_)
return {"em": em}
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return model_prefix.startswith('''rag''')
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase__ : Optional[int] = '''dropout_rate'''
for p in extra_params:
if getattr(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_):
if not hasattr(lowerCamelCase_ ,lowerCamelCase_) and not hasattr(lowerCamelCase_ ,equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCamelCase_))
delattr(lowerCamelCase_ ,lowerCamelCase_)
continue
lowerCAmelCase__ : Dict = p if hasattr(lowerCamelCase_ ,lowerCamelCase_) else equivalent_param[p]
setattr(lowerCamelCase_ ,lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_))
delattr(lowerCamelCase_ ,lowerCamelCase_)
return hparams, config
| 94 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "lilt"
def __init__( self , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0 , a__="absolute" , a__=None , a__=4 , a__=1_024 , **a__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=a__ , **a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = classifier_dropout
snake_case_ = channel_shrink_ratio
snake_case_ = max_ad_position_embeddings
| 85 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__UpperCamelCase : Optional[int] = 0b101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__UpperCamelCase : Optional[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __magic_name__ :
def __init__( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = WATERMARK_BITS
UpperCamelCase__ : Dict = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : torch.FloatTensor ) -> List[Any]:
'''simple docstring'''
if images.shape[-1] < 256:
return images
UpperCamelCase__ : Optional[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ : Optional[Any] = [self.encoder.encode(lowerCamelCase__ , '''dwtDct''' ) for image in images]
UpperCamelCase__ : Optional[Any] = torch.from_numpy(np.array(lowerCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
UpperCamelCase__ : Union[str, Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 368 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Optional[Any] = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
UpperCamelCase__ : Optional[Any] = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
UpperCamelCase__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
UpperCamelCase__ : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 51 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.