code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase : str = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
__UpperCamelCase : Optional[int] = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def snake_case_ ( ):
UpperCAmelCase_ : List[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase_ : Tuple = bs[:]
UpperCAmelCase_ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[Any] = set()
UpperCAmelCase_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Dict = char
return pairs
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = VOCAB_FILES_NAMES
A_ : Dict = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = ['input_ids', 'attention_mask']
def __init__( self : List[str] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[Any]="replace" , __snake_case : Tuple="<s>" , __snake_case : Union[str, Any]="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Any="<s>" , __snake_case : str="<unk>" , __snake_case : Tuple="<pad>" , __snake_case : Any="<mask>" , __snake_case : List[str]=False , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
UpperCAmelCase_ : List[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
UpperCAmelCase_ : str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
UpperCAmelCase_ : str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
UpperCAmelCase_ : str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
UpperCAmelCase_ : Optional[int] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Any = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ : Optional[Any] = json.load(__snake_case )
UpperCAmelCase_ : str = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : Optional[int] = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase_ : Tuple = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Optional[int] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : str = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self : str , __snake_case : Optional[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Any = tuple(__snake_case )
UpperCAmelCase_ : Union[str, Any] = get_pairs(__snake_case )
if not pairs:
return token
while True:
UpperCAmelCase_ : List[Any] = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Dict = bigram
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
while i < len(__snake_case ):
try:
UpperCAmelCase_ : Optional[int] = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Optional[int] = tuple(__snake_case )
UpperCAmelCase_ : List[str] = new_word
if len(__snake_case ) == 1:
break
else:
UpperCAmelCase_ : Tuple = get_pairs(__snake_case )
UpperCAmelCase_ : Optional[int] = ''' '''.join(__snake_case )
UpperCAmelCase_ : Optional[Any] = word
return word
def _lowerCamelCase ( self : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = []
for token in re.findall(self.pat , __snake_case ):
UpperCAmelCase_ : Any = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(''' ''' ) )
return bpe_tokens
def _lowerCamelCase ( self : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self : int , __snake_case : Union[str, Any] ):
'''simple docstring'''
return self.decoder.get(__snake_case )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ''''''.join(__snake_case )
UpperCAmelCase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
UpperCAmelCase_ : Dict = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase_ : Any = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
UpperCAmelCase_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def _lowerCamelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Dict = [self.sep_token_id]
UpperCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple , __snake_case : List[Any]=False , **__snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = ''' ''' + text
return (text, kwargs)
| 641
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCamelCase : Tuple = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCamelCase : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCamelCase : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCamelCase : Dict = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
__UpperCamelCase : int = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
__UpperCamelCase : Any = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
__UpperCamelCase : Tuple = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCamelCase : Tuple = np.expand_dims(test_image, axis=0)
__UpperCamelCase : Dict = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCamelCase : Union[str, Any] = 'Normal'
if result[0][0] == 1:
__UpperCamelCase : Any = 'Abnormality detected'
| 641
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 1
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : Dict , __snake_case : int=768 ):
'''simple docstring'''
super().__init__(__snake_case )
UpperCAmelCase_ : Optional[Any] = proj_size
UpperCAmelCase_ : Tuple = CLIPVisionModel(__snake_case )
UpperCAmelCase_ : List[Any] = PaintByExampleMapper(__snake_case )
UpperCAmelCase_ : Optional[int] = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model(pixel_values=__snake_case )
UpperCAmelCase_ : int = clip_output.pooler_output
UpperCAmelCase_ : Optional[Any] = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : List[str] = self.final_layer_norm(__snake_case )
UpperCAmelCase_ : Union[str, Any] = self.proj_out(__snake_case )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : int , __snake_case : Dict ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : str = config.hidden_size
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : int = nn.ModuleList(
[
BasicTransformerBlock(__snake_case , __snake_case , __snake_case , activation_fn='''gelu''' , attention_bias=__snake_case )
for _ in range(__snake_case )
] )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Tuple ):
'''simple docstring'''
for block in self.blocks:
UpperCAmelCase_ : Tuple = block(__snake_case )
return hidden_states
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
# Initialise PyTorch model
UpperCAmelCase_ : Optional[int] = FunnelConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ : Optional[Any] = FunnelBaseModel(__lowercase ) if base_model else FunnelModel(__lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
__UpperCamelCase : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 641
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 1
|
import pytest
__UpperCamelCase : int = '__dummy_dataset1__'
__UpperCamelCase : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def snake_case_ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def snake_case_ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = dataset_loading_script_name
UpperCAmelCase_ : Union[str, Any] = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__lowercase )
UpperCAmelCase_ : List[str] = script_dir / F'''{script_name}.py'''
with open(__lowercase , '''w''' ) as f:
f.write(__lowercase )
return str(__lowercase )
| 641
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : int = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def snake_case_ ( __lowercase ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase_ : Union[str, Any] = k.replace(__lowercase , __lowercase )
if k.startswith('''encoder''' ):
UpperCAmelCase_ : str = k.replace('''.attn''' , '''.self_attn''' )
UpperCAmelCase_ : int = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase_ : Dict = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
UpperCAmelCase_ : Any = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase_ : Optional[Any] = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
UpperCAmelCase_ : List[str] = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
UpperCAmelCase_ : Tuple = sd.pop(__lowercase )
UpperCAmelCase_ : Union[str, Any] = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
UpperCAmelCase_ : Union[str, Any] = v
__UpperCamelCase : Union[str, Any] = ['START']
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Any = model['''model''']
UpperCAmelCase_ : Any = BlenderbotConfig.from_json_file(__lowercase )
UpperCAmelCase_ : Optional[int] = BlenderbotForConditionalGeneration(__lowercase )
UpperCAmelCase_ : List[str] = m.model.state_dict().keys()
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase_ : str = rename_state_dict_key(__lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase_ : Optional[int] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowercase )
m.model.load_state_dict(__lowercase , strict=__lowercase )
m.half()
m.save_pretrained(__lowercase )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 1
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 1
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__UpperCamelCase : Optional[Any] = '<<<<<<< This should probably be modified because it mentions: '
__UpperCamelCase : Union[str, Any] = '=======\n>>>>>>>\n'
__UpperCamelCase : int = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__UpperCamelCase : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def snake_case_ ( __lowercase ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( __snake_case : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__snake_case , required=__snake_case , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__snake_case , required=__snake_case , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__snake_case )
def __init__( self : Tuple , __snake_case : str , __snake_case : str , *__snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = get_logger('''datasets-cli/converting''' )
UpperCAmelCase_ : Tuple = tfds_path
UpperCAmelCase_ : Tuple = datasets_directory
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ : str = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase_ : Optional[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
UpperCAmelCase_ : Tuple = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Optional[Any] = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ : List[str] = os.listdir(__snake_case )
else:
UpperCAmelCase_ : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
UpperCAmelCase_ : int = os.path.join(__snake_case , __snake_case )
UpperCAmelCase_ : Tuple = os.path.join(__snake_case , __snake_case )
if not os.path.isfile(__snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__snake_case , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = f.readlines()
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[str] = []
for line in lines:
UpperCAmelCase_ : int = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase_ : Any = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase_ : Optional[int] = ''''''
continue
elif "from absl import logging" in out_line:
UpperCAmelCase_ : Any = '''from datasets import logging\n'''
elif "getLogger" in out_line:
UpperCAmelCase_ : List[str] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Union[str, Any] = list(filter(lambda __snake_case : e in out_line , __snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__snake_case ) + '''\n''' )
out_lines.append(__snake_case )
out_lines.append(__snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase_ : int = re.sub(__snake_case , __snake_case , __snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase_ : Optional[int] = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
UpperCAmelCase_ : List[Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase_ : Optional[Any] = True
out_lines.append(__snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase_ : Optional[int] = f_name.replace('''.py''' , '''''' )
UpperCAmelCase_ : List[str] = os.path.join(__snake_case , __snake_case )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , __snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__snake_case )
if needs_manual_update:
with_manual_update.append(__snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__snake_case )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
UpperCAmelCase_ : List[Any] = os.path.basename(__snake_case )
UpperCAmelCase_ : Optional[Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__snake_case , __snake_case )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 641
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 1
|
import re
def snake_case_ ( __lowercase ):
if len(re.findall('''[ATCG]''' , __lowercase ) ) != len(__lowercase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
def snake_case_ ( __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , ):
UpperCAmelCase_ : Dict = bnb_quantization_config.load_in_abit
UpperCAmelCase_ : Optional[int] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCAmelCase_ : Optional[Any] = []
# custom device map
if isinstance(__lowercase , __lowercase ) and len(device_map.keys() ) > 1:
UpperCAmelCase_ : Any = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase_ : List[str] = get_keys_to_not_convert(__lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowercase )
UpperCAmelCase_ : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowercase )
# compatibility with peft
UpperCAmelCase_ : List[Any] = load_in_abit
UpperCAmelCase_ : Optional[int] = load_in_abit
UpperCAmelCase_ : Optional[Any] = get_parameter_device(__lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCAmelCase_ : str = replace_with_bnb_layers(__lowercase , __lowercase , modules_to_not_convert=__lowercase )
# convert param to the right dtype
UpperCAmelCase_ : int = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase_ : str = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCAmelCase_ : Dict = getattr(__lowercase , __lowercase , __lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowercase ):
param.to(__lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
UpperCAmelCase_ : int = replace_with_bnb_layers(
__lowercase , __lowercase , modules_to_not_convert=__lowercase )
UpperCAmelCase_ : Optional[int] = get_quantized_model_device_map(
__lowercase , __lowercase , __lowercase , max_memory=__lowercase , no_split_module_classes=__lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[Any] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowercase , __lowercase , __lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowercase , offload_state_dict=__lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowercase , device_map=__lowercase , offload_dir=__lowercase )
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None ):
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase_ : List[Any] = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowercase , __lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCAmelCase_ : List[str] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : Tuple = special_dtypes
UpperCAmelCase_ : List[str] = no_split_module_classes
UpperCAmelCase_ : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase_ : Union[str, Any] = get_balanced_memory(
__lowercase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowercase , **__lowercase , )
UpperCAmelCase_ : List[Any] = max_memory
UpperCAmelCase_ : List[Any] = infer_auto_device_map(__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ):
# check if don't have any quantized module on the cpu
UpperCAmelCase_ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase_ : int = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
if modules_to_not_convert is None:
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = _replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , ):
UpperCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase_ : Union[str, Any] = []
current_key_name.append(__lowercase )
if isinstance(__lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase_ : Dict = '''.'''.join(__lowercase )
UpperCAmelCase_ : Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase_ : Tuple = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase_ : List[str] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase_ : Tuple = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCAmelCase_ : Optional[Any] = module.weight.data
if module.bias is not None:
UpperCAmelCase_ : List[str] = module.bias.data
bnb_module.requires_grad_(__lowercase )
setattr(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Optional[Any] = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = _replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case_ ( __lowercase ):
# Create a copy of the model
with init_empty_weights():
UpperCAmelCase_ : List[str] = deepcopy(__lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase_ : int = find_tied_parameters(__lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase_ : Optional[int] = sum(__lowercase , [] )
UpperCAmelCase_ : Optional[Any] = len(__lowercase ) > 0
# Check if it is a base model
UpperCAmelCase_ : Union[str, Any] = False
if hasattr(__lowercase , '''base_model_prefix''' ):
UpperCAmelCase_ : Optional[int] = not hasattr(__lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase_ : Optional[Any] = list(model.named_children() )
UpperCAmelCase_ : int = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase_ : int = set(__lowercase ) - set(__lowercase )
UpperCAmelCase_ : List[str] = list(set(__lowercase ) ) + list(__lowercase )
# remove ".weight" from the keys
UpperCAmelCase_ : Optional[Any] = ['''.weight''', '''.bias''']
UpperCAmelCase_ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase_ : List[str] = name.replace(__lowercase , '''''' )
filtered_module_names.append(__lowercase )
return filtered_module_names
def snake_case_ ( __lowercase ):
for m in model.modules():
if isinstance(__lowercase , bnb.nn.Linearabit ):
return True
return False
def snake_case_ ( __lowercase ):
return next(parameter.parameters() ).device
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowercase , __lowercase , 0 , dtype=__lowercase , value=__lowercase )
UpperCAmelCase_ : Optional[int] = param_name
UpperCAmelCase_ : Optional[int] = model
if "." in tensor_name:
UpperCAmelCase_ : int = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCAmelCase_ : Any = getattr(__lowercase , __lowercase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
UpperCAmelCase_ : Any = new_module
UpperCAmelCase_ : int = splits[-1]
# offload weights
UpperCAmelCase_ : Any = False
offload_weight(module._parameters[tensor_name] , __lowercase , __lowercase , index=__lowercase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowercase , index=__lowercase , )
else:
offload_weight(__lowercase , __lowercase , __lowercase , index=__lowercase )
offload_weight(__lowercase , param_name.replace('''weight''' , '''SCB''' ) , __lowercase , index=__lowercase )
set_module_tensor_to_device(__lowercase , __lowercase , '''meta''' , dtype=__lowercase , value=torch.empty(*param.size() ) )
| 641
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 1
|
from torch import nn
def snake_case_ ( __lowercase ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase__( unittest.TestCase , snake_case__ ):
'''simple docstring'''
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Any = load_tool('''text-classification''' )
self.tool.setup()
UpperCAmelCase_ : str = load_tool('''text-classification''' , remote=__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(__snake_case , '''positive''' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(__snake_case , '''positive''' )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(__snake_case , '''positive''' )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(__snake_case , '''positive''' )
| 641
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 1
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ : Union[str, Any] = model_name.find('''patch''' )
UpperCAmelCase_ : Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
UpperCAmelCase_ : Optional[Any] = XCLIPVisionConfig(patch_size=__lowercase , num_frames=__lowercase )
if "large" in model_name:
UpperCAmelCase_ : Tuple = 7_6_8
UpperCAmelCase_ : Union[str, Any] = 3_0_7_2
UpperCAmelCase_ : Optional[int] = 1_2
UpperCAmelCase_ : Dict = 1_0_2_4
UpperCAmelCase_ : List[str] = 4_0_9_6
UpperCAmelCase_ : int = 1_6
UpperCAmelCase_ : int = 2_4
UpperCAmelCase_ : Tuple = 7_6_8
UpperCAmelCase_ : str = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Tuple = 3_3_6
UpperCAmelCase_ : List[str] = XCLIPConfig.from_text_vision_configs(__lowercase , __lowercase )
if "large" in model_name:
UpperCAmelCase_ : Dict = 7_6_8
return config
def snake_case_ ( __lowercase ):
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase_ : List[Any] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
UpperCAmelCase_ : Union[str, Any] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
UpperCAmelCase_ : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
UpperCAmelCase_ : Tuple = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ : Optional[int] = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
UpperCAmelCase_ : Tuple = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
UpperCAmelCase_ : Optional[Any] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
UpperCAmelCase_ : Union[str, Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
UpperCAmelCase_ : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
UpperCAmelCase_ : List[Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
UpperCAmelCase_ : str = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
UpperCAmelCase_ : List[Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
UpperCAmelCase_ : List[str] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ : Dict = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
UpperCAmelCase_ : Tuple = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ : int = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
UpperCAmelCase_ : List[Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
UpperCAmelCase_ : int = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def snake_case_ ( __lowercase , __lowercase ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Any = orig_state_dict.pop(__lowercase )
if "attn.in_proj" in key:
UpperCAmelCase_ : List[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
UpperCAmelCase_ : Any = key_split[3]
UpperCAmelCase_ : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ : List[Any] = val[
:dim, :
]
UpperCAmelCase_ : Any = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Tuple = val[
-dim:, :
]
else:
UpperCAmelCase_ : Dict = val[
:dim
]
UpperCAmelCase_ : Union[str, Any] = val[
dim : dim * 2
]
UpperCAmelCase_ : List[Any] = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ : Union[str, Any] = val[
:dim, :
]
UpperCAmelCase_ : Tuple = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Tuple = val[
-dim:, :
]
else:
UpperCAmelCase_ : int = val[:dim]
UpperCAmelCase_ : int = val[
dim : dim * 2
]
UpperCAmelCase_ : List[str] = val[-dim:]
elif key.startswith('''mit''' ):
UpperCAmelCase_ : List[str] = key_split[2]
UpperCAmelCase_ : int = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ : Optional[int] = val[:dim, :]
UpperCAmelCase_ : Any = val[dim : dim * 2, :]
UpperCAmelCase_ : Any = val[-dim:, :]
else:
UpperCAmelCase_ : Any = val[:dim]
UpperCAmelCase_ : str = val[dim : dim * 2]
UpperCAmelCase_ : Any = val[-dim:]
else:
UpperCAmelCase_ : str = key_split[2]
UpperCAmelCase_ : str = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Dict = val[:dim, :]
UpperCAmelCase_ : Optional[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[-dim:, :]
else:
UpperCAmelCase_ : Any = val[:dim]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2
]
UpperCAmelCase_ : int = val[-dim:]
else:
UpperCAmelCase_ : Optional[Any] = rename_key(__lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ : List[str] = val.T
UpperCAmelCase_ : int = val
return orig_state_dict
def snake_case_ ( __lowercase ):
if num_frames == 8:
UpperCAmelCase_ : Optional[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
UpperCAmelCase_ : int = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
UpperCAmelCase_ : Union[str, Any] = '''eating_spaghetti_32_frames.npy'''
UpperCAmelCase_ : str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=__lowercase , repo_type='''dataset''' , )
UpperCAmelCase_ : Optional[Any] = np.load(__lowercase )
return list(__lowercase )
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : Tuple = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
UpperCAmelCase_ : Dict = model_to_url[model_name]
UpperCAmelCase_ : Union[str, Any] = 8
if "16-frames" in model_name:
UpperCAmelCase_ : str = 1_6
elif "shot" in model_name:
UpperCAmelCase_ : Tuple = 3_2
UpperCAmelCase_ : List[str] = get_xclip_config(__lowercase , __lowercase )
UpperCAmelCase_ : Any = XCLIPModel(__lowercase )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ : Dict = '''pytorch_model.bin'''
gdown.cached_download(__lowercase , __lowercase , quiet=__lowercase )
UpperCAmelCase_ : int = torch.load(__lowercase , map_location='''cpu''' )['''model''']
else:
UpperCAmelCase_ : Tuple = torch.hub.load_state_dict_from_url(__lowercase )['''model''']
UpperCAmelCase_ : List[Any] = convert_state_dict(__lowercase , __lowercase )
UpperCAmelCase_ : Union[str, Any] = XCLIPModel(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ : Tuple = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
UpperCAmelCase_ : Tuple = VideoMAEImageProcessor(size=__lowercase )
UpperCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : int = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : Union[str, Any] = XCLIPProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : List[Any] = prepare_video(__lowercase )
UpperCAmelCase_ : str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=__lowercase , return_tensors='''pt''' , padding=__lowercase )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
# Verify outputs
UpperCAmelCase_ : Dict = outputs.logits_per_video
UpperCAmelCase_ : Any = logits_per_video.softmax(dim=1 )
print('''Probs:''' , __lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ : Optional[int] = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ : int = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ : List[Any] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ : str = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ : str = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : List[str] = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ : Tuple = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ : List[str] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ : Any = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ : Any = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ : int = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ : List[Any] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ : int = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ : int = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(__lowercase , __lowercase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(__lowercase , organization='''nielsr''' )
processor.push_to_hub(__lowercase , organization='''nielsr''' )
slow_tokenizer.push_to_hub(__lowercase , organization='''nielsr''' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 1
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __snake_case : int , __snake_case : Dict=7 , __snake_case : Any=3 , __snake_case : Dict=18 , __snake_case : Optional[Any]=30 , __snake_case : Tuple=400 , __snake_case : List[str]=True , __snake_case : Tuple=None , __snake_case : Tuple=True , __snake_case : Dict=False , __snake_case : Any=True , __snake_case : Optional[Any]=True , __snake_case : Dict=[0.5, 0.5, 0.5] , __snake_case : int=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Optional[Any] = min_resolution
UpperCAmelCase_ : Union[str, Any] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Optional[Any] = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCAmelCase_ : Dict = do_thumbnail
UpperCAmelCase_ : int = do_align_axis
UpperCAmelCase_ : Dict = do_pad
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : List[str] = image_mean
UpperCAmelCase_ : Any = image_std
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__snake_case , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
pass
@is_flaky()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : str = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : str = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 641
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 1
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = (IPNDMScheduler,)
A_ : Any = (('num_inference_steps', 5_0),)
def _lowerCamelCase ( self : Any , **__snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = {'''num_train_timesteps''': 1_000}
config.update(**__snake_case )
return config
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str=0 , **__snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop('''num_inference_steps''' , __snake_case )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config(**__snake_case )
UpperCAmelCase_ : int = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
UpperCAmelCase_ : Tuple = scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
UpperCAmelCase_ : str = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Union[str, Any] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
UpperCAmelCase_ : List[Any] = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __snake_case : List[Any]=0 , **__snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop('''num_inference_steps''' , __snake_case )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : Tuple = 0.1 * sample
UpperCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
UpperCAmelCase_ : int = scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Dict = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : int = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self : Optional[int] , **__snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config(**__snake_case )
UpperCAmelCase_ : Any = scheduler_class(**__snake_case )
UpperCAmelCase_ : Any = 10
UpperCAmelCase_ : int = self.dummy_model()
UpperCAmelCase_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : str = model(__snake_case , __snake_case )
UpperCAmelCase_ : Any = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[Any] = model(__snake_case , __snake_case )
UpperCAmelCase_ : List[Any] = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Any = kwargs.pop('''num_inference_steps''' , __snake_case )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**__snake_case )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(__snake_case , '''set_timesteps''' ):
scheduler.set_timesteps(__snake_case )
elif num_inference_steps is not None and not hasattr(__snake_case , '''set_timesteps''' ):
UpperCAmelCase_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : Any = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.timesteps[5]
UpperCAmelCase_ : str = scheduler.timesteps[6]
UpperCAmelCase_ : List[str] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
UpperCAmelCase_ : Any = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Tuple = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
UpperCAmelCase_ : Optional[int] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=__snake_case , time_step=__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__snake_case , time_step=__snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = self.full_loop()
UpperCAmelCase_ : int = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 1
|
import cva
import numpy as np
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Dict , __snake_case : float , __snake_case : int ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase_ : Optional[int] = k
UpperCAmelCase_ : int = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : str ):
'''simple docstring'''
return str(self.k )
def _lowerCamelCase ( self : Optional[int] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = cva.imread(__snake_case , 0 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = img.shape
UpperCAmelCase_ : list[list[int]] = []
UpperCAmelCase_ : Union[str, Any] = img.copy()
UpperCAmelCase_ : str = cva.cvtColor(__snake_case , cva.COLOR_GRAY2RGB )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = np.gradient(__snake_case )
UpperCAmelCase_ : Union[str, Any] = dx**2
UpperCAmelCase_ : Optional[int] = dy**2
UpperCAmelCase_ : Optional[int] = dx * dy
UpperCAmelCase_ : Dict = 0.04
UpperCAmelCase_ : Tuple = self.window_size // 2
for y in range(__snake_case , h - offset ):
for x in range(__snake_case , w - offset ):
UpperCAmelCase_ : int = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ : List[str] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ : Dict = (wxx * wyy) - (wxy**2)
UpperCAmelCase_ : str = wxx + wyy
UpperCAmelCase_ : str = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase : int = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase : Optional[Any] = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 641
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 1
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__UpperCamelCase : Optional[int] = 2048
__UpperCamelCase : List[str] = 4096
__UpperCamelCase : int = 42
__UpperCamelCase : Union[str, Any] = os.environ.pop('PROCESS_TRAIN', 'false')
__UpperCamelCase : Optional[Any] = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def snake_case_ ( __lowercase ):
def choose_first(__lowercase , __lowercase=False ):
assert isinstance(__lowercase , __lowercase )
if len(__lowercase ) == 1:
UpperCAmelCase_ : Optional[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase_ : Any = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
UpperCAmelCase_ : Dict = {'''id''': example['''id''']}
UpperCAmelCase_ : Dict = example['''annotations''']
UpperCAmelCase_ : Dict = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase_ : List[str] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = ['''<cls>''']
else:
UpperCAmelCase_ : Dict = ['''short''']
UpperCAmelCase_ : List[Any] = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
UpperCAmelCase_ : int = ['''long''']
UpperCAmelCase_ : Optional[Any] = choose_first(annotation['''long_answer'''] , is_long_answer=__lowercase )
UpperCAmelCase_ : Any = []
answer.update(__lowercase )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase_ : str = True
else:
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : int = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , __lowercase ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Dict = _get_single_answer(__lowercase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase_ : Optional[int] = example['''document''']['''tokens''']
UpperCAmelCase_ : Dict = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(__lowercase ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase_ : Dict = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase_ : Tuple = example['''document''']['''tokens''']
UpperCAmelCase_ : Any = answer['''start_token''']
UpperCAmelCase_ : List[Any] = answer['''end_token''']
UpperCAmelCase_ : Optional[Any] = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase_ : Any = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase_ : Optional[Any] = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
UpperCAmelCase_ : List[str] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
UpperCAmelCase_ : Dict = ''' '''.join([old[i] for i in range(len(__lowercase ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , __lowercase , end='''\n''' )
print('''Old:''' , __lowercase , end='''\n\n''' )
return {
"context": " ".join(__lowercase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def snake_case_ ( __lowercase , __lowercase , __lowercase=2_0_4_8 , __lowercase=4_0_9_6 , __lowercase=True ):
# overlap will be of doc_stride - q_len
UpperCAmelCase_ : Optional[Any] = get_context_and_ans(__lowercase , assertion=__lowercase )
UpperCAmelCase_ : str = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase_ : List[Any] = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
UpperCAmelCase_ : List[str] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Dict = input_ids[:q_len]
UpperCAmelCase_ : Dict = range(__lowercase , len(__lowercase ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase_ : List[Any] = i + max_length - q_len
UpperCAmelCase_ : Union[str, Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(__lowercase ),
"end_token": [-1_0_0] * len(__lowercase ),
"category": category,
},
}
UpperCAmelCase_ : Union[str, Any] = out['''context'''].split()
UpperCAmelCase_ : int = splitted_context[answer['''end_token''']]
UpperCAmelCase_ : Optional[Any] = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=__lowercase , ).input_ids )
UpperCAmelCase_ : Optional[int] = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=__lowercase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase_ : List[str] = len(tokenizer(__lowercase , add_special_tokens=__lowercase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase_ : Tuple = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
UpperCAmelCase_ : Dict = answer['''start_token''']
UpperCAmelCase_ : int = answer['''end_token''']
if assertion:
UpperCAmelCase_ : List[Any] = tokenizer.decode(__lowercase )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , __lowercase , end='''\n\n''' )
if len(__lowercase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase_ : Optional[Any] = input_ids[:q_len]
UpperCAmelCase_ : int = range(__lowercase , len(__lowercase ) , max_length - doc_stride )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Any = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase_ : Union[str, Any] = i + max_length - q_len
UpperCAmelCase_ : Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase_ : str = start_token - i + q_len
UpperCAmelCase_ : Tuple = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
UpperCAmelCase_ : Any = -1_0_0
UpperCAmelCase_ : int = -1_0_0
answers_category.append('''null''' )
UpperCAmelCase_ : Optional[int] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__lowercase )
answers_end_token.append(__lowercase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(__lowercase ) )
print('''Old:''' , tokenizer.decode(__lowercase ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def snake_case_ ( __lowercase , __lowercase , __lowercase=2_0_4_8 , __lowercase=4_0_9_6 , __lowercase=False ):
UpperCAmelCase_ : List[str] = get_strided_contexts_and_ans(
__lowercase , __lowercase , doc_stride=__lowercase , max_length=__lowercase , assertion=__lowercase , )
return example
def snake_case_ ( __lowercase , __lowercase ):
with jsonlines.open(__lowercase , '''a''' ) as writer:
for example in tqdm(__lowercase , total=len(__lowercase ) , desc='''Saving samples ... ''' ):
UpperCAmelCase_ : List[str] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__UpperCamelCase : str = load_dataset('natural_questions')
__UpperCamelCase : Any = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
__UpperCamelCase : List[Any] = data['train' if PROCESS_TRAIN == 'true' else 'validation']
__UpperCamelCase : int = {
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
__UpperCamelCase : Optional[Any] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__UpperCamelCase : str = data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
__UpperCamelCase : int = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 641
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 1
|
def snake_case_ ( __lowercase = 1 , __lowercase = 1_0_0_0 ):
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : str = 0
for divide_by_number in range(__lowercase , digit + 1 ):
UpperCAmelCase_ : list[int] = []
UpperCAmelCase_ : int = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
UpperCAmelCase_ : Union[str, Any] = len(__lowercase )
UpperCAmelCase_ : str = divide_by_number
else:
has_been_divided.append(__lowercase )
UpperCAmelCase_ : Union[str, Any] = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 1
|
def snake_case_ ( __lowercase ):
if edge <= 0 or not isinstance(__lowercase , __lowercase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case_ ( __lowercase ):
if edge <= 0 or not isinstance(__lowercase , __lowercase ):
raise ValueError('''Length must be a positive.''' )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 1
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
import requests
from bsa import BeautifulSoup
def snake_case_ ( __lowercase = "https://www.worldometers.info/coronavirus" ):
UpperCAmelCase_ : Optional[Any] = BeautifulSoup(requests.get(__lowercase ).text , '''html.parser''' )
UpperCAmelCase_ : Tuple = soup.findAll('''h1''' )
UpperCAmelCase_ : Optional[int] = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase , __lowercase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 641
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 1
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = CodeGenTokenizer
A_ : str = CodeGenTokenizerFast
A_ : Any = True
A_ : Optional[Any] = {'add_prefix_space': True}
A_ : Optional[Any] = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
UpperCAmelCase_ : Optional[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def _lowerCamelCase ( self : str , **__snake_case : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def _lowerCamelCase ( self : str , **__snake_case : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def _lowerCamelCase ( self : Dict , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : str = '''lower newer'''
UpperCAmelCase_ : Any = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase_ : str = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
UpperCAmelCase_ : Optional[int] = '''lower newer'''
# Testing tokenization
UpperCAmelCase_ : Dict = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids without special tokens
UpperCAmelCase_ : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids with special tokens
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
UpperCAmelCase_ : Any = tokenizer.encode(__snake_case , add_prefix_space=__snake_case )
UpperCAmelCase_ : str = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing the unknown token
UpperCAmelCase_ : List[str] = tokens + [rust_tokenizer.unk_token]
UpperCAmelCase_ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def _lowerCamelCase ( self : int , *__snake_case : str , **__snake_case : str ):
'''simple docstring'''
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _lowerCamelCase ( self : Dict , __snake_case : Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
UpperCAmelCase_ : str = '''This is a simple input'''
UpperCAmelCase_ : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : Any = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : List[str] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='''max_length''' , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='''max_length''' , )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
UpperCAmelCase_ : Optional[int] = '''This is a simple input'''
UpperCAmelCase_ : Optional[Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
UpperCAmelCase_ : str = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : List[str] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
UpperCAmelCase_ : int = tokenizer.pad_token_id
UpperCAmelCase_ : List[Any] = tokenizer(__snake_case , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
UpperCAmelCase_ : Any = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='''np''' )
UpperCAmelCase_ : List[Any] = tokenizer(*__snake_case , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
UpperCAmelCase_ : Optional[int] = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : int = '''$$$'''
UpperCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__snake_case , add_bos_token=__snake_case )
UpperCAmelCase_ : Optional[int] = '''This is a simple input'''
UpperCAmelCase_ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : Optional[Any] = tokenizer.bos_token_id
UpperCAmelCase_ : Dict = tokenizer(__snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer(__snake_case )
self.assertEqual(out_s.input_ids[0] , __snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase_ : str = tokenizer.decode(out_s.input_ids )
UpperCAmelCase_ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
UpperCAmelCase_ : int = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
UpperCAmelCase_ : Tuple = '''\nif len_a > len_b: result = a\nelse: result = b'''
UpperCAmelCase_ : List[Any] = tokenizer.encode(__snake_case )
UpperCAmelCase_ : str = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
UpperCAmelCase_ : str = tokenizer.decode(__snake_case , truncate_before_pattern=__snake_case )
self.assertEqual(__snake_case , __snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
| 641
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 1
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
# Initialise PyTorch model
UpperCAmelCase_ : List[Any] = RemBertConfig.from_json_file(__lowercase )
print('''Building PyTorch model from configuration: {}'''.format(str(__lowercase ) ) )
UpperCAmelCase_ : Optional[int] = RemBertModel(__lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__lowercase ) )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 641
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = os.path.join(args.tf_model_dir , '''parameters.json''' )
UpperCAmelCase_ : List[str] = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
UpperCAmelCase_ : int = args.output + '''.pt'''
UpperCAmelCase_ : List[str] = OrderedDict()
with tf.device('''/CPU:0''' ):
UpperCAmelCase_ : Dict = tf.train.load_checkpoint(args.tf_model_dir )
UpperCAmelCase_ : Dict = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCAmelCase_ : Dict = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
UpperCAmelCase_ : Union[str, Any] = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
UpperCAmelCase_ : Union[str, Any] = 8
UpperCAmelCase_ : Optional[Any] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCAmelCase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Dict = torch.tensor(__lowercase )
elif key_name.startswith('''model/moe''' ):
UpperCAmelCase_ : List[str] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
UpperCAmelCase_ : str = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
UpperCAmelCase_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : List[str] = torch.tensor(__lowercase )
elif key_name.endswith('''/softmlp/kernel''' ):
UpperCAmelCase_ : List[Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
UpperCAmelCase_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Optional[Any] = torch.tensor(__lowercase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
UpperCAmelCase_ : List[Any] = key_name[-9:-7]
for i in range(1_6 ):
UpperCAmelCase_ : List[str] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
UpperCAmelCase_ : Optional[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCAmelCase_ : str = torch.tensor(__lowercase )
elif key_name.startswith('''model/mlp''' ):
UpperCAmelCase_ : Optional[Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
UpperCAmelCase_ : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
UpperCAmelCase_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Optional[int] = torch.tensor(__lowercase )
elif key_name.endswith('''/p1/bias''' ):
UpperCAmelCase_ : Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
UpperCAmelCase_ : Dict = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : Optional[int] = torch.tensor(__lowercase )
elif key_name.endswith('''/p2/kernel''' ):
UpperCAmelCase_ : str = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
UpperCAmelCase_ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : str = torch.tensor(__lowercase )
elif key_name.endswith('''/p2/bias''' ):
UpperCAmelCase_ : List[Any] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
UpperCAmelCase_ : List[Any] = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : int = torch.tensor(__lowercase )
elif key_name.startswith('''model/ln''' ):
UpperCAmelCase_ : List[Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
UpperCAmelCase_ : Any = '''model.blocks.%d.feed_forward.norm.bias''' % player
UpperCAmelCase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : int = torch.tensor(__lowercase )
elif key_name.endswith('''/g''' ):
UpperCAmelCase_ : Optional[Any] = '''model.blocks.%d.feed_forward.norm.weight''' % player
UpperCAmelCase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : Dict = torch.tensor(__lowercase )
elif key_name.startswith('''model/att''' ):
UpperCAmelCase_ : Tuple = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
UpperCAmelCase_ : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCAmelCase_ : int = state[:, 0, :, :]
UpperCAmelCase_ : List[str] = state[:, 1, :, :]
UpperCAmelCase_ : int = state[:, 2, :, :]
UpperCAmelCase_ : List[str] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
UpperCAmelCase_ : str = torch.tensor(__lowercase )
UpperCAmelCase_ : int = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
UpperCAmelCase_ : List[Any] = torch.tensor(__lowercase )
UpperCAmelCase_ : int = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
UpperCAmelCase_ : Tuple = torch.tensor(__lowercase )
elif key_name.endswith('''/o/kernel''' ):
UpperCAmelCase_ : int = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
UpperCAmelCase_ : Optional[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Tuple = torch.tensor(__lowercase )
elif key_name.startswith('''model/an''' ):
UpperCAmelCase_ : str = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
UpperCAmelCase_ : List[str] = '''model.blocks.%d.self_attn.norm.bias''' % player
UpperCAmelCase_ : str = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : List[Any] = torch.tensor(__lowercase )
elif key_name.endswith('''/g''' ):
UpperCAmelCase_ : Optional[Any] = '''model.blocks.%d.self_attn.norm.weight''' % player
UpperCAmelCase_ : Optional[Any] = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : Dict = torch.tensor(__lowercase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
UpperCAmelCase_ : str = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
UpperCAmelCase_ : Optional[int] = '''model.%s.weight''' % nlayer
UpperCAmelCase_ : List[str] = vnp.copy() # same in embedded
UpperCAmelCase_ : Any = torch.tensor(__lowercase )
if key_name.startswith('''model/wte''' ):
UpperCAmelCase_ : Union[str, Any] = '''lm_head.weight'''
UpperCAmelCase_ : Dict = vnp.copy() # same in embedded
UpperCAmelCase_ : Union[str, Any] = torch.tensor(__lowercase )
elif key_name.startswith('''model/wob''' ):
UpperCAmelCase_ : int = '''final_logits_bias'''
UpperCAmelCase_ : Dict = vnp.copy() # same in embedded
UpperCAmelCase_ : List[Any] = state.reshape((1, -1) )
UpperCAmelCase_ : str = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
UpperCAmelCase_ : str = '''model.last_project.weight'''
UpperCAmelCase_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Union[str, Any] = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
UpperCAmelCase_ : Tuple = '''model.last_project.bias'''
UpperCAmelCase_ : List[Any] = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : Union[str, Any] = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 1
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__snake_case ) )
UpperCAmelCase_ : Dict = np.random.RandomState(__snake_case )
UpperCAmelCase_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs()
UpperCAmelCase_ : List[Any] = pipe(**__snake_case ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : List[str] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs()
UpperCAmelCase_ : List[Any] = pipe(**__snake_case ).images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
# warmup pass to apply optimizations
UpperCAmelCase_ : Dict = pipe(**self.get_dummy_inputs() )
UpperCAmelCase_ : Any = self.get_dummy_inputs()
UpperCAmelCase_ : int = pipe(**__snake_case ).images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : List[Any] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs()
UpperCAmelCase_ : str = pipe(**__snake_case ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Tuple = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs()
UpperCAmelCase_ : List[str] = pipe(**__snake_case ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Any = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs()
UpperCAmelCase_ : List[str] = pipe(**__snake_case ).images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ort.SessionOptions()
UpperCAmelCase_ : Tuple = False
return options
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase_ : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : List[Any] = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ : str = np.random.RandomState(0 )
UpperCAmelCase_ : int = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type='''np''' , )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ : Any = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase_ : Dict = init_image.resize((768, 512) )
UpperCAmelCase_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Optional[Any] = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ : Dict = np.random.RandomState(0 )
UpperCAmelCase_ : Any = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type='''np''' , )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 641
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
__UpperCamelCase : List[str] = 'RegNetConfig'
# Base docstring
__UpperCamelCase : Dict = 'facebook/regnet-y-040'
__UpperCamelCase : int = [1, 1088, 7, 7]
# Image classification docstring
__UpperCamelCase : Optional[Any] = 'facebook/regnet-y-040'
__UpperCamelCase : Optional[int] = 'tabby, tabby cat'
__UpperCamelCase : List[Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __snake_case : int , __snake_case : int , __snake_case : int = 3 , __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : Optional[str] = "relu" , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Union[str, Any] = nn.Convad(
__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=kernel_size // 2 , groups=__snake_case , bias=__snake_case , )
UpperCAmelCase_ : Union[str, Any] = nn.BatchNormad(__snake_case )
UpperCAmelCase_ : List[str] = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.convolution(__snake_case )
UpperCAmelCase_ : int = self.normalization(__snake_case )
UpperCAmelCase_ : Optional[Any] = self.activation(__snake_case )
return hidden_state
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : RegNetConfig ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCAmelCase_ : Optional[Any] = config.num_channels
def _lowerCamelCase ( self : List[Any] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCAmelCase_ : Tuple = self.embedder(__snake_case )
return hidden_state
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : int = 2 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = nn.Convad(__snake_case , __snake_case , kernel_size=1 , stride=__snake_case , bias=__snake_case )
UpperCAmelCase_ : List[Any] = nn.BatchNormad(__snake_case )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tensor ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.convolution(__snake_case )
UpperCAmelCase_ : Optional[Any] = self.normalization(__snake_case )
return hidden_state
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : int , __snake_case : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Any = nn.AdaptiveAvgPoolad((1, 1) )
UpperCAmelCase_ : Any = nn.Sequential(
nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.ReLU() , nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.Sigmoid() , )
def _lowerCamelCase ( self : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
# b c h w -> b c 1 1
UpperCAmelCase_ : List[str] = self.pooler(__snake_case )
UpperCAmelCase_ : List[Any] = self.attention(__snake_case )
UpperCAmelCase_ : Optional[int] = hidden_state * attention
return hidden_state
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : str = in_channels != out_channels or stride != 1
UpperCAmelCase_ : List[str] = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ : Dict = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ : Optional[Any] = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
UpperCAmelCase_ : int = ACTaFN[config.hidden_act]
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = hidden_state
UpperCAmelCase_ : Union[str, Any] = self.layer(__snake_case )
UpperCAmelCase_ : Union[str, Any] = self.shortcut(__snake_case )
hidden_state += residual
UpperCAmelCase_ : List[Any] = self.activation(__snake_case )
return hidden_state
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Any = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Optional[Any] = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ : Tuple = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ : int = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
UpperCAmelCase_ : Optional[Any] = ACTaFN[config.hidden_act]
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = hidden_state
UpperCAmelCase_ : Optional[Any] = self.layer(__snake_case )
UpperCAmelCase_ : Union[str, Any] = self.shortcut(__snake_case )
hidden_state += residual
UpperCAmelCase_ : Tuple = self.activation(__snake_case )
return hidden_state
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 2 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Union[str, Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
UpperCAmelCase_ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__snake_case , __snake_case , __snake_case , stride=__snake_case , ) , *[layer(__snake_case , __snake_case , __snake_case ) for _ in range(depth - 1 )] , )
def _lowerCamelCase ( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.layers(__snake_case )
return hidden_state
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : RegNetConfig ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCAmelCase_ : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__snake_case , config.depths[1:] ):
self.stages.append(RegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case ) )
def _lowerCamelCase ( self : List[str] , __snake_case : Tensor , __snake_case : bool = False , __snake_case : bool = True ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_ : Union[str, Any] = hidden_states + (hidden_state,)
UpperCAmelCase_ : str = stage_module(__snake_case )
if output_hidden_states:
UpperCAmelCase_ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Dict = RegNetConfig
A_ : Dict = 'regnet'
A_ : Union[str, Any] = 'pixel_values'
A_ : Union[str, Any] = True
def _lowerCamelCase ( self : str , __snake_case : Any ):
'''simple docstring'''
if isinstance(__snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False ):
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : str = value
__UpperCamelCase : Tuple = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCamelCase : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , snake_case__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
super().__init__(__snake_case )
UpperCAmelCase_ : List[str] = config
UpperCAmelCase_ : List[Any] = RegNetEmbeddings(__snake_case )
UpperCAmelCase_ : Optional[Any] = RegNetEncoder(__snake_case )
UpperCAmelCase_ : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self : List[str] , __snake_case : Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None ):
'''simple docstring'''
UpperCAmelCase_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Union[str, Any] = self.embedder(__snake_case )
UpperCAmelCase_ : Optional[int] = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
UpperCAmelCase_ : List[Any] = encoder_outputs[0]
UpperCAmelCase_ : int = self.pooler(__snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
super().__init__(__snake_case )
UpperCAmelCase_ : Optional[Any] = config.num_labels
UpperCAmelCase_ : Any = RegNetModel(__snake_case )
# classification head
UpperCAmelCase_ : List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self : Any , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.LongTensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Dict = self.regnet(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
UpperCAmelCase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ : str = self.classifier(__snake_case )
UpperCAmelCase_ : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ : Dict = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ : Any = '''single_label_classification'''
else:
UpperCAmelCase_ : Any = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCAmelCase_ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase_ : Union[str, Any] = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ : Union[str, Any] = CrossEntropyLoss()
UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ : Tuple = BCEWithLogitsLoss()
UpperCAmelCase_ : int = loss_fct(__snake_case , __snake_case )
if not return_dict:
UpperCAmelCase_ : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 641
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 1
|
from __future__ import annotations
__UpperCamelCase : Optional[Any] = list[list[int]]
# assigning initial values to the grid
__UpperCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def snake_case_ ( __lowercase ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def snake_case_ ( __lowercase ):
if location := find_empty_location(__lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(__lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = digit
if sudoku(__lowercase ) is not None:
return grid
UpperCAmelCase_ : Union[str, Any] = 0
return None
def snake_case_ ( __lowercase ):
for row in grid:
for cell in row:
print(__lowercase , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__UpperCamelCase : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 641
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 1
|
def snake_case_ ( __lowercase = 2_0_0 ):
UpperCAmelCase_ : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ : Union[str, Any] = [0] * (pence + 1)
UpperCAmelCase_ : Optional[int] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 641
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 1_8, 2]
UpperCAmelCase_ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
UpperCAmelCase_ : Tuple = True if '''large''' in model_name or '''huge''' in model_name else False
UpperCAmelCase_ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
UpperCAmelCase_ : List[Any] = [3, 3, 3, 3]
UpperCAmelCase_ : Union[str, Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
UpperCAmelCase_ : Tuple = [4, 4, 4, 4]
UpperCAmelCase_ : List[str] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
UpperCAmelCase_ : Tuple = [3, 3, 3, 3]
if "lrf" in model_name:
UpperCAmelCase_ : Dict = [3, 3, 3, 3]
else:
UpperCAmelCase_ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
UpperCAmelCase_ : Optional[int] = 9_6
elif "small" in model_name:
UpperCAmelCase_ : Tuple = 9_6
elif "base" in model_name:
UpperCAmelCase_ : Optional[Any] = 1_2_8
elif "large" in model_name:
UpperCAmelCase_ : int = 1_9_2
elif "xlarge" in model_name:
UpperCAmelCase_ : Dict = 2_5_6
elif "huge" in model_name:
UpperCAmelCase_ : Union[str, Any] = 3_5_2
# set label information
UpperCAmelCase_ : Dict = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
UpperCAmelCase_ : List[Any] = '''imagenet-22k-id2label.json'''
else:
UpperCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : int = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Optional[Any] = {int(__lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : List[str] = FocalNetConfig(
embed_dim=__lowercase , depths=__lowercase , focal_levels=__lowercase , focal_windows=__lowercase , use_conv_embed=__lowercase , idalabel=__lowercase , labelaid=__lowercase , use_post_layernorm=__lowercase , use_layerscale=__lowercase , )
return config
def snake_case_ ( __lowercase ):
if "patch_embed.proj" in name:
UpperCAmelCase_ : int = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCAmelCase_ : Any = '''encoder.''' + name
if "encoder.layers" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
UpperCAmelCase_ : int = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
UpperCAmelCase_ : List[str] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
UpperCAmelCase_ : List[str] = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
UpperCAmelCase_ : Tuple = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
UpperCAmelCase_ : str = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
UpperCAmelCase_ : Tuple = '''layernorm.weight'''
if name == "norm.bias":
UpperCAmelCase_ : Dict = '''layernorm.bias'''
if "head" in name:
UpperCAmelCase_ : Tuple = name.replace('''head''' , '''classifier''' )
else:
UpperCAmelCase_ : str = '''focalnet.''' + name
return name
def snake_case_ ( __lowercase , __lowercase , __lowercase=False ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
UpperCAmelCase_ : List[Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowercase )
UpperCAmelCase_ : List[str] = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase_ : int = state_dict.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
UpperCAmelCase_ : Optional[Any] = get_focalnet_config(__lowercase )
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(__lowercase )
model.eval()
# load state dict
model.load_state_dict(__lowercase )
# verify conversion
UpperCAmelCase_ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ : Optional[int] = BitImageProcessor(
do_resize=__lowercase , size={'''shortest_edge''': 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowercase , crop_size=2_2_4 , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , )
UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
UpperCAmelCase_ : Union[str, Any] = processor(images=__lowercase , return_tensors='''pt''' )
UpperCAmelCase_ : List[str] = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCAmelCase_ : str = image_transforms(__lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowercase , atol=1e-4 )
UpperCAmelCase_ : Union[str, Any] = model(**__lowercase )
UpperCAmelCase_ : Any = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
UpperCAmelCase_ : Optional[Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
UpperCAmelCase_ : Optional[Any] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
UpperCAmelCase_ : Any = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
UpperCAmelCase_ : List[Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Tuple = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 1
|
def snake_case_ ( __lowercase ):
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
UpperCAmelCase_ : Any = [True] * (num + 1)
UpperCAmelCase_ : Any = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowercase ):
UpperCAmelCase_ : Optional[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 1
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : Optional[Any]=7 , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : Union[str, Any]=True , __snake_case : int=True , __snake_case : List[str]=99 , __snake_case : List[str]=32 , __snake_case : Tuple=5 , __snake_case : int=4 , __snake_case : List[Any]=37 , __snake_case : str="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Optional[int]=512 , __snake_case : Tuple=16 , __snake_case : List[str]=2 , __snake_case : List[Any]=0.02 , __snake_case : Tuple=4 , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Union[str, Any] = use_attention_mask
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[Any] = num_choices
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[int] = None
if self.use_attention_mask:
UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__snake_case , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : int = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : str = model_class_name.from_pretrained('''distilbert-base-uncased''' )
UpperCAmelCase_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase_ : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase_ : str = model(__snake_case , attention_mask=__snake_case )[0]
UpperCAmelCase_ : List[Any] = (1, 11, 768)
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase_ : List[Any] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 ) )
| 641
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 1
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['input_features', 'attention_mask']
def __init__( self : int , __snake_case : Any=80 , __snake_case : Union[str, Any]=16_000 , __snake_case : int=0.0 , __snake_case : Tuple=10 , __snake_case : Any=25 , __snake_case : Optional[int]="hamming_window" , __snake_case : Optional[int]=32_768.0 , __snake_case : List[Any]=0.97 , __snake_case : Optional[Any]=1.0 , __snake_case : List[str]=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=False , **__snake_case : List[Any] , ):
'''simple docstring'''
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
UpperCAmelCase_ : Any = feature_size
UpperCAmelCase_ : List[str] = sampling_rate
UpperCAmelCase_ : str = padding_value
UpperCAmelCase_ : Optional[Any] = hop_length
UpperCAmelCase_ : Optional[int] = win_length
UpperCAmelCase_ : Union[str, Any] = frame_signal_scale
UpperCAmelCase_ : int = preemphasis_coeff
UpperCAmelCase_ : List[Any] = mel_floor
UpperCAmelCase_ : List[Any] = normalize_means
UpperCAmelCase_ : Tuple = normalize_vars
UpperCAmelCase_ : List[str] = win_function
UpperCAmelCase_ : Optional[Any] = return_attention_mask
UpperCAmelCase_ : Optional[Any] = win_length * sampling_rate // 1_000
UpperCAmelCase_ : Optional[Any] = hop_length * sampling_rate // 1_000
UpperCAmelCase_ : int = optimal_fft_length(self.sample_size )
UpperCAmelCase_ : Union[str, Any] = (self.n_fft // 2) + 1
def _lowerCamelCase ( self : int , __snake_case : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCAmelCase_ : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=__snake_case )
else:
UpperCAmelCase_ : str = window_function(window_length=self.sample_size , name=self.win_function )
UpperCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCAmelCase_ : str = spectrogram(
one_waveform * self.frame_signal_scale , window=__snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__snake_case , preemphasis=self.preemphasis_coeff , mel_filters=__snake_case , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def _lowerCamelCase ( self : int , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCAmelCase_ : int = x[:input_length].mean(axis=0 )
UpperCAmelCase_ : str = np.subtract(__snake_case , __snake_case )
if self.normalize_vars:
UpperCAmelCase_ : int = x[:input_length].std(axis=0 )
UpperCAmelCase_ : Optional[Any] = np.divide(__snake_case , __snake_case )
if input_length < x.shape[0]:
UpperCAmelCase_ : Union[str, Any] = padding_value
# make sure array is in float32
UpperCAmelCase_ : List[str] = x.astype(np.floataa )
return x
def _lowerCamelCase ( self : str , __snake_case : List[np.ndarray] , __snake_case : Optional[np.ndarray] = None ):
'''simple docstring'''
UpperCAmelCase_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__snake_case , __snake_case , self.padding_value ) for x, n in zip(__snake_case , __snake_case )]
def __call__( self : str , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[int] = None , **__snake_case : Tuple , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCAmelCase_ : Dict = isinstance(__snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase_ : Tuple = is_batched_numpy or (
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ : str = [np.asarray(__snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
UpperCAmelCase_ : str = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ : Union[str, Any] = [raw_speech]
# extract fbank features
UpperCAmelCase_ : Any = [self._extract_mfsc_features(__snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase_ : Any = BatchFeature({'''input_features''': features} )
UpperCAmelCase_ : List[Any] = self.pad(
__snake_case , padding=__snake_case , max_length=__snake_case , truncation=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , **__snake_case , )
# make sure list is in array format
UpperCAmelCase_ : List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __snake_case ):
UpperCAmelCase_ : int = [np.asarray(__snake_case , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase_ : List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCAmelCase_ : Optional[Any] = [np.asarray(__snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase_ : str = (
np.array(__snake_case , dtype=np.intaa )
if self._get_padding_strategies(__snake_case , max_length=__snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase_ : str = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__snake_case )
if return_tensors is not None:
UpperCAmelCase_ : int = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , snake_case__ , )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = RobertaConfig
A_ : List[Any] = 'roberta'
def __init__( self : str , __snake_case : Tuple ):
'''simple docstring'''
super().__init__(__snake_case )
UpperCAmelCase_ : Tuple = RobertaEmbeddings(__snake_case )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , snake_case__ , )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = RobertaConfig
A_ : int = 'roberta'
def __init__( self : str , __snake_case : Optional[int] ):
'''simple docstring'''
super().__init__(__snake_case )
UpperCAmelCase_ : int = config.num_labels
UpperCAmelCase_ : Dict = config.num_hidden_layers
UpperCAmelCase_ : Dict = DeeRobertaModel(__snake_case )
UpperCAmelCase_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ : str = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : int=None , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Union[str, Any]=None , __snake_case : List[str]=None , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=-1 , __snake_case : List[str]=False , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.num_layers
try:
UpperCAmelCase_ : List[Any] = self.roberta(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , )
UpperCAmelCase_ : Optional[int] = outputs[1]
UpperCAmelCase_ : List[Any] = self.dropout(__snake_case )
UpperCAmelCase_ : str = self.classifier(__snake_case )
UpperCAmelCase_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ : Optional[Any] = e.message
UpperCAmelCase_ : Tuple = e.exit_layer
UpperCAmelCase_ : Any = outputs[0]
if not self.training:
UpperCAmelCase_ : Tuple = entropy(__snake_case )
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : Optional[int] = MSELoss()
UpperCAmelCase_ : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : Any = CrossEntropyLoss()
UpperCAmelCase_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_ : int = []
for highway_exit in outputs[-1]:
UpperCAmelCase_ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(__snake_case )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : Union[str, Any] = MSELoss()
UpperCAmelCase_ : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : Tuple = CrossEntropyLoss()
UpperCAmelCase_ : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__snake_case )
if train_highway:
UpperCAmelCase_ : Optional[int] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ : Optional[Any] = (loss,) + outputs
if not self.training:
UpperCAmelCase_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 641
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 1
|
from __future__ import annotations
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : Dict = result + left + right
return input_list
def snake_case_ ( __lowercase ):
if len(__lowercase ) <= 1:
return input_list
UpperCAmelCase_ : Tuple = list(__lowercase )
# iteration for two-way merging
UpperCAmelCase_ : Union[str, Any] = 2
while p <= len(__lowercase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowercase ) , __lowercase ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : List[Any] = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(__lowercase , __lowercase , __lowercase , __lowercase )
# final merge of last two parts
if p * 2 >= len(__lowercase ):
UpperCAmelCase_ : List[str] = i
UpperCAmelCase_ : str = merge(__lowercase , 0 , __lowercase , len(__lowercase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__UpperCamelCase : int = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__UpperCamelCase : Optional[int] = []
else:
__UpperCamelCase : List[str] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 641
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 1
|
import math
def snake_case_ ( __lowercase , __lowercase ):
return math.pow(__lowercase , 2 ) - a
def snake_case_ ( __lowercase ):
return 2 * x
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[Any] = 2.0
while start <= a:
UpperCAmelCase_ : Any = math.pow(__lowercase , 2 )
return start
def snake_case_ ( __lowercase , __lowercase = 9_9_9_9 , __lowercase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase_ : Dict = get_initial_point(__lowercase )
for _ in range(__lowercase ):
UpperCAmelCase_ : Optional[int] = value
UpperCAmelCase_ : str = value - fx(__lowercase , __lowercase ) / fx_derivative(__lowercase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 1
|
from pathlib import Path
import numpy as np
from PIL import Image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def snake_case_ ( __lowercase ):
return (gray > 1_2_7) & (gray <= 2_5_5)
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Union[str, Any] = np.zeros_like(__lowercase )
UpperCAmelCase_ : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase_ : Optional[int] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase_ : Dict = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase_ : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCamelCase : int = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__UpperCamelCase : Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCamelCase : Optional[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCamelCase : Any = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCamelCase : Union[str, Any] = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 641
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 1
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = XLMTokenizer
A_ : Dict = False
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ : Dict = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase_ : str = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ : Union[str, Any] = '''lower'''
UpperCAmelCase_ : Optional[Any] = ['''low''', '''er</w>''']
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase_ : str = tokens + ['''<unk>''']
UpperCAmelCase_ : Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
UpperCAmelCase_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
UpperCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 641
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__UpperCamelCase : Any = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__UpperCamelCase : int = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : int = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowercase )[0]
@deprecated(__lowercase , '''Please use tf.data to implement this functionality.''' )
def snake_case_ ( __lowercase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
UpperCAmelCase_ : int = _readaa(__lowercase )
if magic != 2_0_5_1:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
UpperCAmelCase_ : int = _readaa(__lowercase )
UpperCAmelCase_ : Optional[Any] = _readaa(__lowercase )
UpperCAmelCase_ : str = _readaa(__lowercase )
UpperCAmelCase_ : Optional[Any] = bytestream.read(rows * cols * num_images )
UpperCAmelCase_ : Any = numpy.frombuffer(__lowercase , dtype=numpy.uinta )
UpperCAmelCase_ : Optional[int] = data.reshape(__lowercase , __lowercase , __lowercase , 1 )
return data
@deprecated(__lowercase , '''Please use tf.one_hot on tensors.''' )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : str = labels_dense.shape[0]
UpperCAmelCase_ : Union[str, Any] = numpy.arange(__lowercase ) * num_classes
UpperCAmelCase_ : int = numpy.zeros((num_labels, num_classes) )
UpperCAmelCase_ : Union[str, Any] = 1
return labels_one_hot
@deprecated(__lowercase , '''Please use tf.data to implement this functionality.''' )
def snake_case_ ( __lowercase , __lowercase=False , __lowercase=1_0 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
UpperCAmelCase_ : Any = _readaa(__lowercase )
if magic != 2_0_4_9:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
UpperCAmelCase_ : Any = _readaa(__lowercase )
UpperCAmelCase_ : List[Any] = bytestream.read(__lowercase )
UpperCAmelCase_ : Union[str, Any] = numpy.frombuffer(__lowercase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowercase , __lowercase )
return labels
class lowerCAmelCase__:
'''simple docstring'''
@deprecated(
__snake_case , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Dict , __snake_case : str , __snake_case : Dict , __snake_case : List[Any]=False , __snake_case : Any=False , __snake_case : str=dtypes.floataa , __snake_case : Dict=True , __snake_case : Dict=None , ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : str = random_seed.get_seed(__snake_case )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCAmelCase_ : Optional[int] = dtypes.as_dtype(__snake_case ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
UpperCAmelCase_ : Dict = 10_000
UpperCAmelCase_ : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
UpperCAmelCase_ : int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCAmelCase_ : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCAmelCase_ : Dict = images.astype(numpy.floataa )
UpperCAmelCase_ : Dict = numpy.multiply(__snake_case , 1.0 / 255.0 )
UpperCAmelCase_ : List[Any] = images
UpperCAmelCase_ : Optional[Any] = labels
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Dict = 0
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return self._images
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return self._labels
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return self._num_examples
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self._epochs_completed
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : int=False , __snake_case : List[Any]=True ):
'''simple docstring'''
if fake_data:
UpperCAmelCase_ : List[Any] = [1] * 784
UpperCAmelCase_ : Any = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__snake_case )],
[fake_label for _ in range(__snake_case )],
)
UpperCAmelCase_ : Dict = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCAmelCase_ : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(__snake_case )
UpperCAmelCase_ : Dict = self.images[perma]
UpperCAmelCase_ : List[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCAmelCase_ : Any = self._num_examples - start
UpperCAmelCase_ : Tuple = self._images[start : self._num_examples]
UpperCAmelCase_ : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCAmelCase_ : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(__snake_case )
UpperCAmelCase_ : Tuple = self.images[perm]
UpperCAmelCase_ : Optional[int] = self.labels[perm]
# Start next epoch
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : str = batch_size - rest_num_examples
UpperCAmelCase_ : Dict = self._index_in_epoch
UpperCAmelCase_ : Dict = self._images[start:end]
UpperCAmelCase_ : Optional[int] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCAmelCase_ : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowercase , '''Please write your own downloading logic.''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
if not gfile.Exists(__lowercase ):
gfile.MakeDirs(__lowercase )
UpperCAmelCase_ : Tuple = os.path.join(__lowercase , __lowercase )
if not gfile.Exists(__lowercase ):
urllib.request.urlretrieve(__lowercase , __lowercase ) # noqa: S310
with gfile.GFile(__lowercase ) as f:
UpperCAmelCase_ : str = f.size()
print('''Successfully downloaded''' , __lowercase , __lowercase , '''bytes.''' )
return filepath
@deprecated(
__lowercase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def snake_case_ ( __lowercase , __lowercase=False , __lowercase=False , __lowercase=dtypes.floataa , __lowercase=True , __lowercase=5_0_0_0 , __lowercase=None , __lowercase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowercase , one_hot=__lowercase , dtype=__lowercase , seed=__lowercase )
UpperCAmelCase_ : str = fake()
UpperCAmelCase_ : Optional[int] = fake()
UpperCAmelCase_ : List[str] = fake()
return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
if not source_url: # empty string check
UpperCAmelCase_ : Any = DEFAULT_SOURCE_URL
UpperCAmelCase_ : int = '''train-images-idx3-ubyte.gz'''
UpperCAmelCase_ : Tuple = '''train-labels-idx1-ubyte.gz'''
UpperCAmelCase_ : Any = '''t10k-images-idx3-ubyte.gz'''
UpperCAmelCase_ : List[Any] = '''t10k-labels-idx1-ubyte.gz'''
UpperCAmelCase_ : List[Any] = _maybe_download(
__lowercase , __lowercase , source_url + train_images_file )
with gfile.Open(__lowercase , '''rb''' ) as f:
UpperCAmelCase_ : Union[str, Any] = _extract_images(__lowercase )
UpperCAmelCase_ : List[Any] = _maybe_download(
__lowercase , __lowercase , source_url + train_labels_file )
with gfile.Open(__lowercase , '''rb''' ) as f:
UpperCAmelCase_ : List[Any] = _extract_labels(__lowercase , one_hot=__lowercase )
UpperCAmelCase_ : List[Any] = _maybe_download(
__lowercase , __lowercase , source_url + test_images_file )
with gfile.Open(__lowercase , '''rb''' ) as f:
UpperCAmelCase_ : Optional[int] = _extract_images(__lowercase )
UpperCAmelCase_ : Tuple = _maybe_download(
__lowercase , __lowercase , source_url + test_labels_file )
with gfile.Open(__lowercase , '''rb''' ) as f:
UpperCAmelCase_ : Any = _extract_labels(__lowercase , one_hot=__lowercase )
if not 0 <= validation_size <= len(__lowercase ):
UpperCAmelCase_ : Tuple = (
'''Validation size should be between 0 and '''
F'''{len(__lowercase )}. Received: {validation_size}.'''
)
raise ValueError(__lowercase )
UpperCAmelCase_ : str = train_images[:validation_size]
UpperCAmelCase_ : Dict = train_labels[:validation_size]
UpperCAmelCase_ : int = train_images[validation_size:]
UpperCAmelCase_ : Optional[Any] = train_labels[validation_size:]
UpperCAmelCase_ : Any = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
UpperCAmelCase_ : Tuple = _DataSet(__lowercase , __lowercase , **__lowercase )
UpperCAmelCase_ : Dict = _DataSet(__lowercase , __lowercase , **__lowercase )
UpperCAmelCase_ : Optional[Any] = _DataSet(__lowercase , __lowercase , **__lowercase )
return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
| 641
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Dict = (DPMSolverSinglestepScheduler,)
A_ : Dict = (('num_inference_steps', 2_5),)
def _lowerCamelCase ( self : List[Any] , **__snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**__snake_case )
return config
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[Any]=0 , **__snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : int = kwargs.pop('''num_inference_steps''' , __snake_case )
UpperCAmelCase_ : Optional[Any] = self.dummy_sample
UpperCAmelCase_ : str = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Dict = self.get_scheduler_config(**__snake_case )
UpperCAmelCase_ : Dict = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
UpperCAmelCase_ : Union[str, Any] = scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
UpperCAmelCase_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = sample, sample
for t in range(__snake_case , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase_ : Union[str, Any] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : int , __snake_case : Optional[Any]=0 , **__snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''num_inference_steps''' , __snake_case )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : Any = 0.1 * sample
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
UpperCAmelCase_ : Optional[Any] = scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ : Dict = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
UpperCAmelCase_ : Tuple = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self : str , __snake_case : Tuple=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if scheduler is None:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Tuple = self.get_scheduler_config(**__snake_case )
UpperCAmelCase_ : List[Any] = scheduler_class(**__snake_case )
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config(**__snake_case )
UpperCAmelCase_ : List[Any] = scheduler_class(**__snake_case )
UpperCAmelCase_ : List[Any] = 10
UpperCAmelCase_ : List[Any] = self.dummy_model()
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : List[Any] = model(__snake_case , __snake_case )
UpperCAmelCase_ : Any = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
UpperCAmelCase_ : List[Any] = 50
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
UpperCAmelCase_ : Dict = model(__snake_case , __snake_case )
UpperCAmelCase_ : Optional[int] = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
UpperCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ : List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
UpperCAmelCase_ : Optional[Any] = self.full_loop(scheduler=__snake_case )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
UpperCAmelCase_ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : str = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : Tuple = self.full_loop(scheduler=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self.check_over_configs(thresholding=__snake_case )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__snake_case , prediction_type=__snake_case , sample_max_value=__snake_case , algorithm_type='''dpmsolver++''' , solver_order=__snake_case , solver_type=__snake_case , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__snake_case , solver_type=__snake_case , prediction_type=__snake_case , algorithm_type=__snake_case , )
UpperCAmelCase_ : List[Any] = self.full_loop(
solver_order=__snake_case , solver_type=__snake_case , prediction_type=__snake_case , algorithm_type=__snake_case , )
assert not torch.isnan(__snake_case ).any(), "Samples have nan numbers"
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=__snake_case )
self.check_over_configs(lower_order_final=__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.check_over_configs(variance_type=__snake_case )
self.check_over_configs(variance_type='''learned_range''' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__snake_case , time_step=0 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.full_loop()
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = self.full_loop(use_karras_sigmas=__snake_case )
UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__snake_case )
UpperCAmelCase_ : Any = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config(thresholding=__snake_case , dynamic_thresholding_ratio=0 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**__snake_case )
UpperCAmelCase_ : Optional[Any] = 10
UpperCAmelCase_ : str = self.dummy_model()
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[int] = model(__snake_case , __snake_case )
UpperCAmelCase_ : Tuple = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 641
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 1
|
import numpy as np
from transformers import Pipeline
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = np.max(__lowercase , axis=-1 , keepdims=__lowercase )
UpperCAmelCase_ : int = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowercase )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , **__snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {}
if "second_text" in kwargs:
UpperCAmelCase_ : List[str] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def _lowerCamelCase ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple=None ):
'''simple docstring'''
return self.tokenizer(__snake_case , text_pair=__snake_case , return_tensors=self.framework )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = model_outputs.logits[0].numpy()
UpperCAmelCase_ : Optional[Any] = softmax(__snake_case )
UpperCAmelCase_ : Any = np.argmax(__snake_case )
UpperCAmelCase_ : Optional[int] = self.model.config.idalabel[best_class]
UpperCAmelCase_ : Union[str, Any] = probabilities[best_class].item()
UpperCAmelCase_ : Any = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 641
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : NestedDataStructureLike[PathLike] , __snake_case : Optional[NamedSplit] = None , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[int] = None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
__snake_case , split=__snake_case , features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
UpperCAmelCase_ : Tuple = path_or_paths if isinstance(__snake_case , __snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Text(
cache_dir=__snake_case , data_files=__snake_case , features=__snake_case , **__snake_case , )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
UpperCAmelCase_ : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset
| 641
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 1
|
from math import ceil
def snake_case_ ( __lowercase = 1_0_0_1 ):
UpperCAmelCase_ : int = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ : Tuple = 2 * i + 1
UpperCAmelCase_ : Tuple = 2 * i
UpperCAmelCase_ : Optional[int] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCamelCase : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 641
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Optional[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 1
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Tuple = tokenizer(example['''content'''] , truncation=__lowercase )['''input_ids''']
UpperCAmelCase_ : Dict = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__UpperCamelCase : str = HfArgumentParser(PretokenizationArguments)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
if args.num_workers is None:
__UpperCamelCase : Any = multiprocessing.cpu_count()
__UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__UpperCamelCase : int = time.time()
__UpperCamelCase : Dict = load_dataset(args.dataset_name, split='train')
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
__UpperCamelCase : Optional[int] = time.time()
__UpperCamelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
__UpperCamelCase : Any = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = emb.weight.shape
UpperCAmelCase_ : str = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
UpperCAmelCase_ : int = emb.weight.data
return lin_layer
def snake_case_ ( __lowercase , __lowercase=None ):
UpperCAmelCase_ : Optional[Any] = {}
for old_key in state_dict.keys():
UpperCAmelCase_ : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_ : Tuple = key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase_ : Union[str, Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
UpperCAmelCase_ : Dict = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
UpperCAmelCase_ : Optional[int] = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
UpperCAmelCase_ : Any = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
UpperCAmelCase_ : Optional[int] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
UpperCAmelCase_ : Optional[int] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
UpperCAmelCase_ : Union[str, Any] = state_dict[old_key]
return new_dict
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = WEIGHTS_NAME ):
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : List[Any] = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
UpperCAmelCase_ : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
UpperCAmelCase_ : Optional[int] = torch.load(__lowercase )['''model''']
remove_ignore_keys_(__lowercase )
UpperCAmelCase_ : int = rename_fairseq_keys(__lowercase , __lowercase )
UpperCAmelCase_ : Dict = os.path.join(
__lowercase , weights_name.replace('''.bin''' , F'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
UpperCAmelCase_ : Dict = os.path.join(__lowercase , weights_name.replace('''.bin''' , F'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
UpperCAmelCase_ : Tuple = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__lowercase )
UpperCAmelCase_ : Tuple = rename_fairseq_keys(__lowercase , __lowercase )
UpperCAmelCase_ : Tuple = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
UpperCAmelCase_ : Any = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
UpperCAmelCase_ : Optional[Any] = {}
for idx, shard in enumerate(__lowercase ):
UpperCAmelCase_ : Tuple = weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
UpperCAmelCase_ : int = os.path.join(__lowercase , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
UpperCAmelCase_ : int = shard_file
# Add the metadata
UpperCAmelCase_ : List[Any] = {'''total_size''': total_size}
UpperCAmelCase_ : Any = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '''\n'''
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__UpperCamelCase : int = parser.parse_args()
__UpperCamelCase , __UpperCamelCase : Dict = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase : Optional[int] = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 641
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__UpperCamelCase : Optional[int] = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__UpperCamelCase : List[str] = 0
__UpperCamelCase : str = 0xe000
__UpperCamelCase : Union[str, Any] = 0xe001
__UpperCamelCase : Any = 0xe002
__UpperCamelCase : Dict = 0xe003
__UpperCamelCase : Tuple = 0xe004
# Maps special codepoints to human-readable names.
__UpperCamelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__UpperCamelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __snake_case : Optional[Any]=chr(__snake_case ) , __snake_case : Optional[int]=chr(__snake_case ) , __snake_case : Dict=chr(__snake_case ) , __snake_case : Tuple=chr(__snake_case ) , __snake_case : Dict=chr(__snake_case ) , __snake_case : Tuple=chr(__snake_case ) , __snake_case : List[Any]=False , __snake_case : Optional[int]=2_048 , **__snake_case : str , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
UpperCAmelCase_ : int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
UpperCAmelCase_ : Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
UpperCAmelCase_ : List[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
UpperCAmelCase_ : str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , model_max_length=__snake_case , **__snake_case , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase_ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase_ : List[str] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase_ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase_ : Dict = UNICODE_VOCAB_SIZE
UpperCAmelCase_ : Optional[int] = len(self._special_codepoints )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self._unicode_vocab_size
def _lowerCamelCase ( self : Optional[int] , __snake_case : str ):
'''simple docstring'''
return list(__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str ):
'''simple docstring'''
try:
return ord(__snake_case )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _lowerCamelCase ( self : str , __snake_case : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__snake_case )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _lowerCamelCase ( self : Tuple , __snake_case : Dict ):
'''simple docstring'''
return "".join(__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
UpperCAmelCase_ : Union[str, Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
UpperCAmelCase_ : int = [1] + ([0] * len(__snake_case )) + [1]
if token_ids_a is not None:
result += ([0] * len(__snake_case )) + [1]
return result
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
return ()
| 641
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 1
|
from collections.abc import Callable
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , __snake_case : Callable | None = None ):
'''simple docstring'''
# Stores actual heap items.
UpperCAmelCase_ : list = []
# Stores indexes of each item for supporting updates and deletion.
UpperCAmelCase_ : dict = {}
# Stores current size of heap.
UpperCAmelCase_ : Optional[int] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCAmelCase_ : List[str] = key or (lambda __snake_case : x)
def _lowerCamelCase ( self : List[Any] , __snake_case : int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def _lowerCamelCase ( self : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = int(2 * i + 2 )
return right if 0 < right < self.size else None
def _lowerCamelCase ( self : List[Any] , __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.arr[j], self.arr[i]
def _lowerCamelCase ( self : int , __snake_case : int , __snake_case : int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def _lowerCamelCase ( self : Any , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self._left(__snake_case )
UpperCAmelCase_ : Optional[int] = self._right(__snake_case )
UpperCAmelCase_ : Any = i
if left is not None and not self._cmp(__snake_case , __snake_case ):
UpperCAmelCase_ : Tuple = left
if right is not None and not self._cmp(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[int] = right
return valid_parent
def _lowerCamelCase ( self : Any , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Any = self._parent(__snake_case )
while parent is not None and not self._cmp(__snake_case , __snake_case ):
self._swap(__snake_case , __snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = parent, self._parent(__snake_case )
def _lowerCamelCase ( self : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self._get_valid_parent(__snake_case )
while valid_parent != index:
self._swap(__snake_case , __snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = valid_parent, self._get_valid_parent(__snake_case )
def _lowerCamelCase ( self : str , __snake_case : int , __snake_case : int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase_ : List[Any] = self.pos_map[item]
UpperCAmelCase_ : Optional[Any] = [item, self.key(__snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__snake_case )
self._heapify_down(__snake_case )
def _lowerCamelCase ( self : Dict , __snake_case : int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase_ : List[Any] = self.pos_map[item]
del self.pos_map[item]
UpperCAmelCase_ : Union[str, Any] = self.arr[self.size - 1]
UpperCAmelCase_ : Optional[int] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__snake_case )
self._heapify_down(__snake_case )
def _lowerCamelCase ( self : int , __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__snake_case )] )
else:
UpperCAmelCase_ : List[Any] = [item, self.key(__snake_case )]
UpperCAmelCase_ : int = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.arr[0] if self.size else None
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def snake_case_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 1
|
import numpy as np
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Tuple = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase_ : Tuple = np.zeros((n + 1,) )
UpperCAmelCase_ : int = ya
UpperCAmelCase_ : Tuple = xa
for k in range(__lowercase ):
UpperCAmelCase_ : int = f(__lowercase , y[k] )
UpperCAmelCase_ : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Optional[Any] = f(x + h , y[k] + h * ka )
UpperCAmelCase_ : Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 1
|
def snake_case_ ( __lowercase ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 1
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 641
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def snake_case_ ( __lowercase , __lowercase , __lowercase = 1_0**-1_0 ):
UpperCAmelCase_ : Optional[Any] = a
while True:
UpperCAmelCase_ : Any = Decimal(__lowercase ) - (
Decimal(eval(__lowercase ) ) / Decimal(eval(str(diff(__lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowercase ) ) < precision: # noqa: S307
return float(__lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 1
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : str = checkpoint
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Dict = vae_state_dict['''encoder.conv_in.weight''']
UpperCAmelCase_ : Optional[Any] = vae_state_dict['''encoder.conv_in.bias''']
UpperCAmelCase_ : Dict = vae_state_dict['''encoder.conv_out.weight''']
UpperCAmelCase_ : List[str] = vae_state_dict['''encoder.conv_out.bias''']
UpperCAmelCase_ : Any = vae_state_dict['''encoder.norm_out.weight''']
UpperCAmelCase_ : Optional[Any] = vae_state_dict['''encoder.norm_out.bias''']
UpperCAmelCase_ : str = vae_state_dict['''decoder.conv_in.weight''']
UpperCAmelCase_ : Tuple = vae_state_dict['''decoder.conv_in.bias''']
UpperCAmelCase_ : Union[str, Any] = vae_state_dict['''decoder.conv_out.weight''']
UpperCAmelCase_ : Tuple = vae_state_dict['''decoder.conv_out.bias''']
UpperCAmelCase_ : int = vae_state_dict['''decoder.norm_out.weight''']
UpperCAmelCase_ : List[str] = vae_state_dict['''decoder.norm_out.bias''']
UpperCAmelCase_ : Any = vae_state_dict['''quant_conv.weight''']
UpperCAmelCase_ : Optional[Any] = vae_state_dict['''quant_conv.bias''']
UpperCAmelCase_ : Dict = vae_state_dict['''post_quant_conv.weight''']
UpperCAmelCase_ : Dict = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
UpperCAmelCase_ : Any = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(__lowercase )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
UpperCAmelCase_ : Any = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(__lowercase )
}
for i in range(__lowercase ):
UpperCAmelCase_ : List[str] = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
UpperCAmelCase_ : Any = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
UpperCAmelCase_ : List[str] = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
UpperCAmelCase_ : Tuple = renew_vae_resnet_paths(__lowercase )
UpperCAmelCase_ : Any = {'''old''': F'''down.{i}.block''', '''new''': F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
UpperCAmelCase_ : Union[str, Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ : Tuple = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
UpperCAmelCase_ : Optional[Any] = renew_vae_resnet_paths(__lowercase )
UpperCAmelCase_ : int = {'''old''': F'''mid.block_{i}''', '''new''': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
UpperCAmelCase_ : int = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
UpperCAmelCase_ : List[str] = renew_vae_attention_paths(__lowercase )
UpperCAmelCase_ : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
for i in range(__lowercase ):
UpperCAmelCase_ : List[str] = num_up_blocks - 1 - i
UpperCAmelCase_ : str = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
UpperCAmelCase_ : Dict = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
UpperCAmelCase_ : Optional[Any] = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
UpperCAmelCase_ : Any = renew_vae_resnet_paths(__lowercase )
UpperCAmelCase_ : Any = {'''old''': F'''up.{block_id}.block''', '''new''': F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
UpperCAmelCase_ : str = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
UpperCAmelCase_ : int = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ : List[str] = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(__lowercase )
UpperCAmelCase_ : Dict = {'''old''': F'''mid.block_{i}''', '''new''': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
UpperCAmelCase_ : str = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
UpperCAmelCase_ : List[Any] = renew_vae_attention_paths(__lowercase )
UpperCAmelCase_ : int = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
return new_checkpoint
def snake_case_ ( __lowercase , __lowercase , ):
# Only support V1
UpperCAmelCase_ : Optional[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
UpperCAmelCase_ : Any = io.BytesIO(r.content )
UpperCAmelCase_ : Any = OmegaConf.load(__lowercase )
UpperCAmelCase_ : List[Any] = 5_1_2
UpperCAmelCase_ : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
UpperCAmelCase_ : Dict = {}
with safe_open(__lowercase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
UpperCAmelCase_ : int = f.get_tensor(__lowercase )
else:
UpperCAmelCase_ : Optional[int] = torch.load(__lowercase , map_location=__lowercase )['''state_dict''']
# Convert the VAE model.
UpperCAmelCase_ : Optional[int] = create_vae_diffusers_config(__lowercase , image_size=__lowercase )
UpperCAmelCase_ : Union[str, Any] = custom_convert_ldm_vae_checkpoint(__lowercase , __lowercase )
UpperCAmelCase_ : Dict = AutoencoderKL(**__lowercase )
vae.load_state_dict(__lowercase )
vae.save_pretrained(__lowercase )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__UpperCamelCase : Any = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 1
|
import os
import sys
__UpperCamelCase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCamelCase : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoConfig.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoTokenizer.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModel.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModel.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModelForCausalLM.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModelForMaskedLM.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModelForSequenceClassification.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModelForQuestionAnswering.from_pretrained(*__lowercase , **__lowercase )
| 641
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__UpperCamelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__UpperCamelCase : int = (
subprocess.check_output(F'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('utf-8').split()
)
__UpperCamelCase : Optional[Any] = '|'.join(sys.argv[1:])
__UpperCamelCase : Union[str, Any] = re.compile(RF'^({joined_dirs}).*?\.py$')
__UpperCamelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 641
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 1
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Dict , *__snake_case : Any , __snake_case : Dict=None , __snake_case : List[str]=None , **__snake_case : int ):
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = eval_examples
UpperCAmelCase_ : str = post_process_function
def _lowerCamelCase ( self : Any , __snake_case : Optional[Dataset] = None , __snake_case : Optional[Any]=None , __snake_case : Optional[List[str]] = None , __snake_case : str = "eval" , **__snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : int = gen_kwargs.copy()
UpperCAmelCase_ : int = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
UpperCAmelCase_ : Union[str, Any] = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_ : Optional[Any] = gen_kwargs
UpperCAmelCase_ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ : List[Any] = self.get_eval_dataloader(__snake_case )
UpperCAmelCase_ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : Tuple = self.compute_metrics
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[str] = time.time()
UpperCAmelCase_ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ : Optional[int] = eval_loop(
__snake_case , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , metric_key_prefix=__snake_case , )
finally:
UpperCAmelCase_ : Optional[int] = compute_metrics
UpperCAmelCase_ : List[str] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__snake_case , __snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_ : Optional[Any] = self.post_process_function(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : List[Any] = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase_ : Tuple = metrics.pop(__snake_case )
metrics.update(output.metrics )
else:
UpperCAmelCase_ : Optional[int] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__snake_case )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , __snake_case )
return metrics
def _lowerCamelCase ( self : str , __snake_case : Any , __snake_case : int , __snake_case : int=None , __snake_case : str = "test" , **__snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = gen_kwargs.copy()
UpperCAmelCase_ : str = self.get_test_dataloader(__snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : Any = self.compute_metrics
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Union[str, Any] = time.time()
UpperCAmelCase_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ : Optional[Any] = eval_loop(
__snake_case , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , metric_key_prefix=__snake_case , )
finally:
UpperCAmelCase_ : List[Any] = compute_metrics
UpperCAmelCase_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__snake_case , __snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ : Tuple = self.post_process_function(__snake_case , __snake_case , __snake_case , '''predict''' )
UpperCAmelCase_ : Optional[int] = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase_ : List[str] = metrics.pop(__snake_case )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__snake_case )
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = ['pixel_values']
def __init__( self : Dict , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PIL.Image.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : Union[int, float] = 1 / 255 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : Optional[Any] , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {'''height''': 256, '''width''': 256}
UpperCAmelCase_ : Tuple = get_size_dict(__snake_case )
UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ : List[Any] = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Any = size
UpperCAmelCase_ : Dict = resample
UpperCAmelCase_ : List[Any] = do_center_crop
UpperCAmelCase_ : Tuple = crop_size
UpperCAmelCase_ : Union[str, Any] = do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : Any , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PIL.Image.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
__snake_case , size=(size['''height'''], size['''width''']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def _lowerCamelCase ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Dict , ):
'''simple docstring'''
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ):
'''simple docstring'''
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : Dict=None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = resample if resample is not None else self.resample
UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Any = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = size if size is not None else self.size
UpperCAmelCase_ : Dict = get_size_dict(__snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Dict = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase_ : Optional[Any] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : str = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ : Optional[int] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase_ : List[Any] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ : int = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ : Dict = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
UpperCAmelCase_ : Tuple = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase_ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 641
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'blip_text_model'
def __init__( self : Optional[int] , __snake_case : Any=30_524 , __snake_case : str=768 , __snake_case : Dict=768 , __snake_case : Dict=3_072 , __snake_case : List[Any]=768 , __snake_case : Optional[Any]=12 , __snake_case : int=8 , __snake_case : Tuple=512 , __snake_case : Optional[int]="gelu" , __snake_case : List[str]=1E-12 , __snake_case : Union[str, Any]=0.0 , __snake_case : Dict=0.0 , __snake_case : List[str]=0.02 , __snake_case : Optional[Any]=30_522 , __snake_case : List[str]=2 , __snake_case : str=0 , __snake_case : Union[str, Any]=102 , __snake_case : Any=True , __snake_case : Tuple=True , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , sep_token_id=__snake_case , **__snake_case , )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = encoder_hidden_size
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : int = projection_dim
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Any = is_decoder
UpperCAmelCase_ : int = use_cache
@classmethod
def _lowerCamelCase ( cls : Any , __snake_case : Union[str, os.PathLike] , **__snake_case : str ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = cls.get_config_dict(__snake_case , **__snake_case )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase_ : Optional[int] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__snake_case , **__snake_case )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'blip_vision_model'
def __init__( self : Union[str, Any] , __snake_case : int=768 , __snake_case : str=3_072 , __snake_case : Dict=512 , __snake_case : Optional[Any]=12 , __snake_case : Any=12 , __snake_case : Union[str, Any]=384 , __snake_case : Tuple=16 , __snake_case : Union[str, Any]="gelu" , __snake_case : Tuple=1E-5 , __snake_case : List[Any]=0.0 , __snake_case : Dict=1E-10 , **__snake_case : Any , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : List[Any] = projection_dim
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = hidden_act
@classmethod
def _lowerCamelCase ( cls : int , __snake_case : Union[str, os.PathLike] , **__snake_case : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase_ : Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__snake_case , **__snake_case )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Tuple = 'blip'
A_ : List[Any] = True
def __init__( self : List[str] , __snake_case : Optional[Any]=None , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=512 , __snake_case : Optional[int]=2.6_592 , __snake_case : Tuple=256 , **__snake_case : Optional[int] , ):
'''simple docstring'''
super().__init__(**__snake_case )
if text_config is None:
UpperCAmelCase_ : Optional[int] = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
UpperCAmelCase_ : List[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
UpperCAmelCase_ : Tuple = BlipTextConfig(**__snake_case )
UpperCAmelCase_ : Tuple = BlipVisionConfig(**__snake_case )
UpperCAmelCase_ : Any = self.vision_config.hidden_size
UpperCAmelCase_ : str = projection_dim
UpperCAmelCase_ : str = logit_scale_init_value
UpperCAmelCase_ : Optional[int] = 1.0
UpperCAmelCase_ : Dict = 0.02
UpperCAmelCase_ : List[str] = image_text_hidden_size
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : BlipTextConfig , __snake_case : BlipVisionConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.text_config.to_dict()
UpperCAmelCase_ : Tuple = self.vision_config.to_dict()
UpperCAmelCase_ : List[str] = self.__class__.model_type
return output
| 641
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 1
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = RoFormerTokenizer
A_ : int = RoFormerTokenizerFast
A_ : Dict = True
A_ : Optional[Any] = True
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
def _lowerCamelCase ( self : int , **__snake_case : Any ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__snake_case )
def _lowerCamelCase ( self : List[str] , **__snake_case : str ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__snake_case )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''永和服装饰品有限公司,今天天气非常好'''
UpperCAmelCase_ : List[str] = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.get_chinese_input_output_texts()
UpperCAmelCase_ : int = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
UpperCAmelCase_ : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Union[str, Any] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.get_chinese_input_output_texts()
UpperCAmelCase_ : List[str] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
UpperCAmelCase_ : str = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
| 641
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 1
|
import math
from collections.abc import Iterator
from itertools import takewhile
def snake_case_ ( __lowercase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( ):
UpperCAmelCase_ : str = 2
while True:
if is_prime(__lowercase ):
yield num
num += 1
def snake_case_ ( __lowercase = 2_0_0_0_0_0_0 ):
return sum(takewhile(lambda __lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 641
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 1
|
import numpy as np
def snake_case_ ( __lowercase , __lowercase ):
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 641
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 1
|
from math import pi
def snake_case_ ( __lowercase , __lowercase ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 641
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 1
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCamelCase : List[str] = 2_9979_2458
# Symbols
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = symbols('ct x y z')
def snake_case_ ( __lowercase ):
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def snake_case_ ( __lowercase ):
return 1 / sqrt(1 - beta(__lowercase ) ** 2 )
def snake_case_ ( __lowercase ):
return np.array(
[
[gamma(__lowercase ), -gamma(__lowercase ) * beta(__lowercase ), 0, 0],
[-gamma(__lowercase ) * beta(__lowercase ), gamma(__lowercase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def snake_case_ ( __lowercase , __lowercase = None ):
# Ensure event is not empty
if event is None:
UpperCAmelCase_ : Optional[int] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__lowercase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCamelCase : List[Any] = transform(2997_9245)
print('Example of four vector: ')
print(F'ct\' = {four_vector[0]}')
print(F'x\' = {four_vector[1]}')
print(F'y\' = {four_vector[2]}')
print(F'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
__UpperCamelCase : Any = {ct: c, x: 1, y: 1, z: 1}
__UpperCamelCase : List[str] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'\n{numerical_vector}')
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
UpperCAmelCase_ : Optional[Any] = TOKENIZER_CLASSES
else:
UpperCAmelCase_ : Any = {tokenizer_name: getattr(__lowercase , tokenizer_name + '''Fast''' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
UpperCAmelCase_ : str = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase_ : str = True
if checkpoint_name is None:
UpperCAmelCase_ : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase_ : List[str] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
UpperCAmelCase_ : List[str] = tokenizer_class.from_pretrained(__lowercase , force_download=__lowercase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = checkpoint.split('''/''' )
UpperCAmelCase_ : List[Any] = os.path.join(__lowercase , __lowercase )
elif add_prefix:
UpperCAmelCase_ : Union[str, Any] = checkpoint
UpperCAmelCase_ : Tuple = dump_path
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase_ : Dict = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase_ : Optional[Any] = file_path.split(__lowercase )[-1][0]
if next_char == "/":
UpperCAmelCase_ : Union[str, Any] = os.path.join(__lowercase , __lowercase )
UpperCAmelCase_ : Union[str, Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
UpperCAmelCase_ : Optional[int] = tokenizer.save_pretrained(
__lowercase , legacy_format=__lowercase , filename_prefix=__lowercase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__lowercase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 641
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : jnp.ndarray
@flax_register_to_config
class lowerCAmelCase__( nn.Module , snake_case__ , snake_case__ ):
'''simple docstring'''
A_ : int = 3_2
A_ : int = 4
A_ : int = 4
A_ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A_ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
A_ : Union[bool, Tuple[bool]] = False
A_ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
A_ : int = 2
A_ : Union[int, Tuple[int]] = 8
A_ : Optional[Union[int, Tuple[int]]] = None
A_ : int = 1_2_8_0
A_ : float = 0.0
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
A_ : bool = True
A_ : int = 0
A_ : bool = False
def _lowerCamelCase ( self : Tuple , __snake_case : jax.random.KeyArray ):
'''simple docstring'''
# init input tensors
UpperCAmelCase_ : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Optional[int] = jnp.zeros(__snake_case , dtype=jnp.floataa )
UpperCAmelCase_ : int = jnp.ones((1,) , dtype=jnp.intaa )
UpperCAmelCase_ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : int = jax.random.split(__snake_case )
UpperCAmelCase_ : Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__snake_case , __snake_case , __snake_case , __snake_case )["params"]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.block_out_channels
UpperCAmelCase_ : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCAmelCase_ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCAmelCase_ : Tuple = FlaxTimestepEmbedding(__snake_case , dtype=self.dtype )
UpperCAmelCase_ : Union[str, Any] = self.only_cross_attention
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : Optional[int] = output_channel
UpperCAmelCase_ : Union[str, Any] = block_out_channels[i]
UpperCAmelCase_ : List[str] = i == len(__snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : List[str] = FlaxCrossAttnDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCAmelCase_ : List[Any] = FlaxDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__snake_case )
UpperCAmelCase_ : Dict = down_blocks
# mid
UpperCAmelCase_ : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Dict = list(reversed(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = list(reversed(__snake_case ) )
UpperCAmelCase_ : Any = list(reversed(__snake_case ) )
UpperCAmelCase_ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
UpperCAmelCase_ : List[str] = output_channel
UpperCAmelCase_ : List[Any] = reversed_block_out_channels[i]
UpperCAmelCase_ : Dict = reversed_block_out_channels[min(i + 1 , len(__snake_case ) - 1 )]
UpperCAmelCase_ : int = i == len(__snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
UpperCAmelCase_ : List[str] = FlaxCrossAttnUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCAmelCase_ : str = FlaxUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__snake_case )
UpperCAmelCase_ : Any = output_channel
UpperCAmelCase_ : str = up_blocks
# out
UpperCAmelCase_ : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
UpperCAmelCase_ : Union[str, Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : str=None , __snake_case : List[Any]=None , __snake_case : bool = True , __snake_case : bool = False , ):
'''simple docstring'''
# 1. time
if not isinstance(__snake_case , jnp.ndarray ):
UpperCAmelCase_ : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : Tuple = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : List[str] = jnp.expand_dims(__snake_case , 0 )
UpperCAmelCase_ : int = self.time_proj(__snake_case )
UpperCAmelCase_ : Optional[Any] = self.time_embedding(__snake_case )
# 2. pre-process
UpperCAmelCase_ : Optional[int] = jnp.transpose(__snake_case , (0, 2, 3, 1) )
UpperCAmelCase_ : List[str] = self.conv_in(__snake_case )
# 3. down
UpperCAmelCase_ : Tuple = (sample,)
for down_block in self.down_blocks:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = down_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = down_block(__snake_case , __snake_case , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
UpperCAmelCase_ : Dict = ()
for down_block_res_sample, down_block_additional_residual in zip(
__snake_case , __snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : Tuple = new_down_block_res_samples
# 4. mid
UpperCAmelCase_ : Tuple = self.mid_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
UpperCAmelCase_ : List[str] = down_block_res_samples[-(self.layers_per_block + 1) :]
UpperCAmelCase_ : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[int] = up_block(
__snake_case , temb=__snake_case , encoder_hidden_states=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train , )
else:
UpperCAmelCase_ : List[Any] = up_block(__snake_case , temb=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train )
# 6. post-process
UpperCAmelCase_ : int = self.conv_norm_out(__snake_case )
UpperCAmelCase_ : List[str] = nn.silu(__snake_case )
UpperCAmelCase_ : Optional[Any] = self.conv_out(__snake_case )
UpperCAmelCase_ : Any = jnp.transpose(__snake_case , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__snake_case )
| 641
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Any = filter(lambda __lowercase : p.requires_grad , model.parameters() )
UpperCAmelCase_ : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase : Dict = logging.getLogger(__name__)
def snake_case_ ( __lowercase , __lowercase ):
if metric == "rouge2":
UpperCAmelCase_ : List[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
UpperCAmelCase_ : str = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
UpperCAmelCase_ : Optional[Any] = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
UpperCAmelCase_ : Union[str, Any] = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
UpperCAmelCase_ : Optional[int] = ModelCheckpoint(
dirpath=__lowercase , filename=__lowercase , monitor=F'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case_ ( __lowercase , __lowercase ):
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__lowercase , verbose=__lowercase , )
class lowerCAmelCase__( pl.Callback ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__snake_case )
@rank_zero_only
def _lowerCamelCase ( self : Tuple , __snake_case : pl.Trainer , __snake_case : pl.LightningModule , __snake_case : str , __snake_case : Optional[Any]=True ):
'''simple docstring'''
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCAmelCase_ : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
UpperCAmelCase_ : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : Optional[Any] = od / '''test_results.txt'''
UpperCAmelCase_ : Any = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCAmelCase_ : Tuple = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__snake_case )
generations_file.parent.mkdir(exist_ok=__snake_case )
with open(__snake_case , '''a+''' ) as writer:
for key in sorted(__snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : str = metrics[key]
if isinstance(__snake_case , torch.Tensor ):
UpperCAmelCase_ : Dict = val.item()
UpperCAmelCase_ : Optional[int] = f'''{key}: {val:.6f}\n'''
writer.write(__snake_case )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : Union[str, Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__snake_case )
@rank_zero_only
def _lowerCamelCase ( self : Any , __snake_case : Tuple , __snake_case : List[str] ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[str] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Union[str, Any] = pl_module.model.num_parameters()
UpperCAmelCase_ : Tuple = count_trainable_parameters(__snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def _lowerCamelCase ( self : List[str] , __snake_case : pl.Trainer , __snake_case : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__snake_case , __snake_case , '''test''' )
@rank_zero_only
def _lowerCamelCase ( self : Any , __snake_case : pl.Trainer , __snake_case : Optional[int] ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 641
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 1
|
def snake_case_ ( __lowercase = 1_0_0_0_0_0_0 ):
UpperCAmelCase_ : List[str] = limit + 1
UpperCAmelCase_ : Dict = [0] * limit
for first_term in range(1 , __lowercase ):
for n in range(__lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ : str = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 641
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Union[str, Any] = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 1
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCAmelCase_ : Union[str, Any] = s_dict.pop(__lowercase )
elif "subsample" in key:
UpperCAmelCase_ : List[Any] = s_dict.pop(__lowercase )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = emb.weight.shape
UpperCAmelCase_ : Union[str, Any] = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : str = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : int = mam_aaa['''args''']
UpperCAmelCase_ : Dict = mam_aaa['''model''']
UpperCAmelCase_ : Optional[Any] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__lowercase )
rename_keys(__lowercase )
UpperCAmelCase_ : int = state_dict['''decoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase_ : Optional[Any] = args.share_decoder_input_output_embed
UpperCAmelCase_ : str = [int(__lowercase ) for i in args.conv_kernel_sizes.split(''',''' )]
UpperCAmelCase_ : List[Any] = SpeechaTextConfig(
vocab_size=__lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__lowercase , num_beams=5 , max_length=2_0_0 , use_cache=__lowercase , decoder_start_token_id=2 , early_stopping=__lowercase , )
UpperCAmelCase_ : Tuple = SpeechaTextForConditionalGeneration(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model.model.load_state_dict(__lowercase , strict=__lowercase )
if len(__lowercase ) > 0 and not set(__lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCAmelCase_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase_ : List[Any] = lm_head_weights
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__UpperCamelCase : Tuple = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__UpperCamelCase : List[str] = None
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Dict = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
__UpperCamelCase : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__UpperCamelCase : Optional[int] = '▁'
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Dict = AlbertTokenizer
def __init__( self : List[str] , __snake_case : Dict=None , __snake_case : List[Any]=None , __snake_case : Dict=True , __snake_case : Tuple=True , __snake_case : List[Any]=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : Any="[SEP]" , __snake_case : Optional[int]="<unk>" , __snake_case : Any="[SEP]" , __snake_case : int="<pad>" , __snake_case : Dict="[CLS]" , __snake_case : str="[MASK]" , **__snake_case : List[Any] , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[str] = (
AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case , normalized=__snake_case )
if isinstance(__snake_case , __snake_case )
else mask_token
)
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
UpperCAmelCase_ : Union[str, Any] = do_lower_case
UpperCAmelCase_ : Any = remove_space
UpperCAmelCase_ : Optional[int] = keep_accents
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : Tuple = False if not self.vocab_file else True
def _lowerCamelCase ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 641
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 1
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = question_encoder
UpperCAmelCase_ : Dict = generator
UpperCAmelCase_ : List[Any] = self.question_encoder
def _lowerCamelCase ( self : Optional[int] , __snake_case : int ):
'''simple docstring'''
if os.path.isfile(__snake_case ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
UpperCAmelCase_ : Dict = os.path.join(__snake_case , '''question_encoder_tokenizer''' )
UpperCAmelCase_ : Any = os.path.join(__snake_case , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__snake_case )
self.generator.save_pretrained(__snake_case )
@classmethod
def _lowerCamelCase ( cls : List[Any] , __snake_case : Optional[int] , **__snake_case : int ):
'''simple docstring'''
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ : str = kwargs.pop('''config''' , __snake_case )
if config is None:
UpperCAmelCase_ : Tuple = RagConfig.from_pretrained(__snake_case )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
__snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
__snake_case , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__snake_case , generator=__snake_case )
def __call__( self : Optional[int] , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
return self.current_tokenizer(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : int , *__snake_case : str , **__snake_case : List[Any] ):
'''simple docstring'''
return self.generator.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : Optional[Any] , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.generator.decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : int = self.question_encoder
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.generator
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : Optional[List[str]] = None , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "longest" , __snake_case : str = None , __snake_case : bool = True , **__snake_case : Any , ):
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __snake_case , )
if max_length is None:
UpperCAmelCase_ : Optional[int] = self.current_tokenizer.model_max_length
UpperCAmelCase_ : Any = self(
__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , max_length=__snake_case , padding=__snake_case , truncation=__snake_case , **__snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ : Optional[Any] = self.current_tokenizer.model_max_length
UpperCAmelCase_ : int = self(
text_target=__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , padding=__snake_case , max_length=__snake_case , truncation=__snake_case , **__snake_case , )
UpperCAmelCase_ : List[str] = labels['''input_ids''']
return model_inputs
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 1
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__UpperCamelCase : Union[str, Any] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Any = FlaxBertModel(__snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCAmelCase_ : str = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case , repo_id='''test-model-flax''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=f'''{key} not identical''' )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : int = FlaxBertModel(__snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=f'''{key} not identical''' )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = flatten_dict(modela.params )
UpperCAmelCase_ : Union[str, Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : List[str] = False
return models_are_equal
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(__snake_case )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) )
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : List[Any] = FlaxBertModel(__snake_case )
UpperCAmelCase_ : List[str] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''bert'''
UpperCAmelCase_ : Any = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
| 641
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 1
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__UpperCamelCase : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : str ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : List[str] = torchvision.models.resnetaaa(pretrained=__snake_case )
UpperCAmelCase_ : Union[str, Any] = list(model.children() )[:-2]
UpperCAmelCase_ : Optional[Any] = nn.Sequential(*__snake_case )
UpperCAmelCase_ : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _lowerCamelCase ( self : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase_ : List[Any] = self.pool(self.model(__snake_case ) )
UpperCAmelCase_ : Dict = torch.flatten(__snake_case , start_dim=2 )
UpperCAmelCase_ : List[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [json.loads(__snake_case ) for l in open(__snake_case )]
UpperCAmelCase_ : List[Any] = os.path.dirname(__snake_case )
UpperCAmelCase_ : int = tokenizer
UpperCAmelCase_ : str = labels
UpperCAmelCase_ : int = len(__snake_case )
UpperCAmelCase_ : List[str] = max_seq_length
UpperCAmelCase_ : List[Any] = transforms
def __len__( self : Any ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Dict , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=__snake_case ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase_ : int = sentence[: self.max_seq_length]
UpperCAmelCase_ : int = torch.zeros(self.n_classes )
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Any = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
UpperCAmelCase_ : Optional[Any] = self.transforms(__snake_case )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[int] = [len(row['''sentence'''] ) for row in batch]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = len(__lowercase ), max(__lowercase )
UpperCAmelCase_ : List[Any] = torch.zeros(__lowercase , __lowercase , dtype=torch.long )
UpperCAmelCase_ : Tuple = torch.zeros(__lowercase , __lowercase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowercase , __lowercase ) ):
UpperCAmelCase_ : List[str] = input_row['''sentence''']
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : str = torch.stack([row['''image'''] for row in batch] )
UpperCAmelCase_ : Any = torch.stack([row['''label'''] for row in batch] )
UpperCAmelCase_ : Optional[Any] = torch.stack([row['''image_start_token'''] for row in batch] )
UpperCAmelCase_ : int = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case_ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case_ ( ):
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 641
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 1
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 1
|
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Tuple = len(__lowercase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(__lowercase )]
# Reverse whole list
UpperCAmelCase_ : int = arr[cur - 1 :: -1] + arr[cur : len(__lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
__UpperCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
__UpperCamelCase : Tuple = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 641
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCamelCase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def snake_case_ ( __lowercase , __lowercase , __lowercase = 1_6_0_0_0 ):
UpperCAmelCase_ : Optional[Any] = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
UpperCAmelCase_ : int = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : Optional[str] = field(default=snake_case__ , metadata={'help': 'Name of a dataset from the datasets package'} )
A_ : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A_ : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the training audio paths and labels.'} )
A_ : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
A_ : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
A_ : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
A_ : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
A_ : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
A_ : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A_ : float = field(
default=2_0 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A_ : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
A_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Name or path of preprocessor config.'} )
A_ : bool = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
A_ : bool = field(
default=snake_case__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
A_ : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A_ : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
A_ : bool = field(
default=snake_case__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __snake_case , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase_ : Optional[int] = DatasetDict()
UpperCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase_ : str = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase_ : List[Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase_ : Optional[Any] = feature_extractor.model_input_names[0]
def train_transforms(__lowercase ):
UpperCAmelCase_ : List[Any] = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase_ : Any = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
UpperCAmelCase_ : Optional[Any] = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase_ : Union[str, Any] = {model_input_name: inputs.get(__lowercase )}
UpperCAmelCase_ : Dict = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowercase ):
UpperCAmelCase_ : Optional[int] = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase_ : List[Any] = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase_ : str = {model_input_name: inputs.get(__lowercase )}
UpperCAmelCase_ : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase_ : Tuple = raw_datasets['''train'''].features[data_args.label_column_name].names
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = {}, {}
for i, label in enumerate(__lowercase ):
UpperCAmelCase_ : Union[str, Any] = str(__lowercase )
UpperCAmelCase_ : Tuple = label
# Load the accuracy metric from the datasets package
UpperCAmelCase_ : Optional[Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowercase ):
UpperCAmelCase_ : int = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
UpperCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ : List[str] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Tuple = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : List[str] = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
UpperCAmelCase_ : Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
UpperCAmelCase_ : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Dict = last_checkpoint
UpperCAmelCase_ : int = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase_ : Any = trainer.evaluate()
trainer.log_metrics('''eval''' , __lowercase )
trainer.save_metrics('''eval''' , __lowercase )
# Write model card and (optionally) push to hub
UpperCAmelCase_ : Tuple = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 641
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 1
|
__UpperCamelCase : Optional[Any] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 641
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = GPTSwaTokenizer
A_ : Dict = False
A_ : str = True
A_ : Optional[Any] = False
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : int = GPTSwaTokenizer(__snake_case , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : str , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''This is a test'''
UpperCAmelCase_ : Optional[int] = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''<s>'''
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__snake_case ) , 2_000 )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = GPTSwaTokenizer(__snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [465, 287, 265, 631, 842] )
UpperCAmelCase_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
__snake_case , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(__snake_case )
# fmt: off
self.assertListEqual(
__snake_case , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = GPTSwaTokenizer(__snake_case )
UpperCAmelCase_ : str = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
UpperCAmelCase_ : List[str] = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__snake_case , __snake_case ):
self.assertListEqual(tokenizer.encode_fast(__snake_case ) , __snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(__snake_case , __snake_case ):
self.assertEqual(tokenizer.decode_fast(__snake_case ) , __snake_case )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : str = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
UpperCAmelCase_ : Any = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__snake_case , )
| 641
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 1
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__UpperCamelCase : Tuple = numpy.array([0, 0])
__UpperCamelCase : Dict = numpy.array([0.5, 0.8_660_254])
__UpperCamelCase : Optional[Any] = numpy.array([1, 0])
__UpperCamelCase : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Tuple = initial_vectors
for _ in range(__lowercase ):
UpperCAmelCase_ : Optional[int] = iteration_step(__lowercase )
return vectors
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Any = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase_ : List[str] = vectors[i + 1]
new_vectors.append(__lowercase )
UpperCAmelCase_ : Union[str, Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = numpy.radians(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = numpy.cos(__lowercase ), numpy.sin(__lowercase )
UpperCAmelCase_ : List[str] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__lowercase , __lowercase )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[int] = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase_ , UpperCAmelCase_ : str = zip(*__lowercase )
plt.plot(__lowercase , __lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Dict = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 641
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( __lowercase , __lowercase ):
def run_func(__lowercase ):
@wraps(__lowercase )
def run_in_eager_mode(*__lowercase , **__lowercase ):
return func(*__lowercase , **__lowercase )
@wraps(__lowercase )
@tf.function(experimental_compile=__lowercase )
def run_in_graph_mode(*__lowercase , **__lowercase ):
return func(*__lowercase , **__lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = random.Random()
UpperCAmelCase_ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowercase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : TensorFlowBenchmarkArguments
A_ : PretrainedConfig
A_ : str = "TensorFlow"
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return tf.__version__
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : int , __snake_case : int ):
'''simple docstring'''
# initialize GPU on separate process
UpperCAmelCase_ : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCAmelCase_ : int = self._prepare_inference_func(__snake_case , __snake_case , __snake_case )
return self._measure_speed(_inference )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCAmelCase_ : int = self._prepare_train_func(__snake_case , __snake_case , __snake_case )
return self._measure_speed(_train )
def _lowerCamelCase ( self : Any , __snake_case : str , __snake_case : int , __snake_case : int ):
'''simple docstring'''
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __snake_case )
UpperCAmelCase_ : str = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCAmelCase_ : Tuple = self._prepare_inference_func(__snake_case , __snake_case , __snake_case )
return self._measure_memory(_inference )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : int , __snake_case : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __snake_case )
UpperCAmelCase_ : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCAmelCase_ : Union[str, Any] = self._prepare_train_func(__snake_case , __snake_case , __snake_case )
return self._measure_memory(_train )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str , __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
UpperCAmelCase_ : Optional[int] = (
hasattr(__snake_case , '''architectures''' )
and isinstance(config.architectures , __snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ : Optional[Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ : Optional[Any] = __import__('''transformers''' , fromlist=[model_class] )
UpperCAmelCase_ : Optional[Any] = getattr(__snake_case , __snake_case )
UpperCAmelCase_ : Tuple = model_cls(__snake_case )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
UpperCAmelCase_ : List[Any] = TF_MODEL_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ : Union[str, Any] = config.vocab_size if hasattr(__snake_case , '''vocab_size''' ) else config.encoder.vocab_size
UpperCAmelCase_ : List[str] = random_input_ids(__snake_case , __snake_case , __snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__snake_case , decoder_input_ids=__snake_case , training=__snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__snake_case , training=__snake_case )
UpperCAmelCase_ : Union[str, Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
UpperCAmelCase_ : Any = (
hasattr(__snake_case , '''architectures''' )
and isinstance(config.architectures , __snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ : Optional[Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ : int = __import__('''transformers''' , fromlist=[model_class] )
UpperCAmelCase_ : List[Any] = getattr(__snake_case , __snake_case )
UpperCAmelCase_ : Optional[int] = model_cls(__snake_case )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
UpperCAmelCase_ : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ : Union[str, Any] = config.vocab_size if hasattr(__snake_case , '''vocab_size''' ) else config.encoder.vocab_size
UpperCAmelCase_ : Union[str, Any] = random_input_ids(__snake_case , __snake_case , __snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase_ : Optional[int] = model(__snake_case , decoder_input_ids=__snake_case , labels=__snake_case , training=__snake_case )[0]
UpperCAmelCase_ : List[Any] = tf.gradients(__snake_case , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase_ : Optional[Any] = model(__snake_case , labels=__snake_case , training=__snake_case )[0]
UpperCAmelCase_ : Any = tf.gradients(__snake_case , model.trainable_variables )
return gradients
UpperCAmelCase_ : Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCamelCase ( self : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(__snake_case , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ : Tuple = timeit.repeat(
__snake_case , repeat=self.args.repeat , number=10 , )
return min(__snake_case ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCamelCase ( self : Dict , __snake_case : Callable[[], None] ):
'''simple docstring'''
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
UpperCAmelCase_ : Optional[int] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
UpperCAmelCase_ : Any = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ : Optional[int] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase_ : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(__snake_case )
UpperCAmelCase_ : Optional[int] = meminfo.used
UpperCAmelCase_ : Union[str, Any] = Memory(__snake_case )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
UpperCAmelCase_ : str = None
else:
UpperCAmelCase_ : Optional[Any] = measure_peak_memory_cpu(__snake_case )
UpperCAmelCase_ : Any = Memory(__snake_case ) if isinstance(__snake_case , __snake_case ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ : Optional[int] = stop_memory_tracing(__snake_case )
if memory is None:
UpperCAmelCase_ : Tuple = summary.total
else:
UpperCAmelCase_ : Dict = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 641
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Any = ['pixel_values']
def __init__( self : Optional[int] , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 255 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = True , **__snake_case : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ : Any = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ : int = get_size_dict(__snake_case , default_to_square=__snake_case , param_name='''crop_size''' )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Union[str, Any] = size
UpperCAmelCase_ : List[str] = resample
UpperCAmelCase_ : List[Any] = do_center_crop
UpperCAmelCase_ : Dict = crop_size
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : List[str] = rescale_factor
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : Optional[int] = do_convert_rgb
def _lowerCamelCase ( self : str , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : str = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ : Union[str, Any] = get_resize_output_image_size(__snake_case , size=size['''shortest_edge'''] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def _lowerCamelCase ( self : List[str] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ):
'''simple docstring'''
UpperCAmelCase_ : Any = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def _lowerCamelCase ( self : Tuple , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ):
'''simple docstring'''
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def _lowerCamelCase ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ):
'''simple docstring'''
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : int = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , **__snake_case : Tuple , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Tuple = size if size is not None else self.size
UpperCAmelCase_ : Any = get_size_dict(__snake_case , param_name='''size''' , default_to_square=__snake_case )
UpperCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Dict = get_size_dict(__snake_case , param_name='''crop_size''' , default_to_square=__snake_case )
UpperCAmelCase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : int = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : Optional[int] = [convert_to_rgb(__snake_case ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : Dict = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ : Optional[int] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase_ : List[Any] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ : Union[str, Any] = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
UpperCAmelCase_ : str = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase_ : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 1
|
import qiskit
def snake_case_ ( __lowercase = 2 ):
UpperCAmelCase_ : Union[str, Any] = qubits
# Using Aer's simulator
UpperCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
UpperCAmelCase_ : Any = qiskit.QuantumCircuit(__lowercase , __lowercase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __lowercase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __lowercase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowercase ) ) , list(range(__lowercase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 641
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger('transformers.models.speecht5')
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
hf_model.apply_weight_norm()
UpperCAmelCase_ : str = checkpoint['''input_conv.weight_g''']
UpperCAmelCase_ : Tuple = checkpoint['''input_conv.weight_v''']
UpperCAmelCase_ : Union[str, Any] = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase_ : Union[str, Any] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase_ : Any = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase_ : int = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase_ : Optional[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase_ : Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase_ : List[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase_ : str = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase_ : Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase_ : Tuple = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase_ : Optional[int] = checkpoint['''output_conv.1.weight_g''']
UpperCAmelCase_ : Dict = checkpoint['''output_conv.1.weight_v''']
UpperCAmelCase_ : Any = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ):
if config_path is not None:
UpperCAmelCase_ : Any = SpeechTaHifiGanConfig.from_pretrained(__lowercase )
else:
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig()
UpperCAmelCase_ : str = SpeechTaHifiGan(__lowercase )
UpperCAmelCase_ : List[Any] = torch.load(__lowercase )
load_weights(orig_checkpoint['''model''']['''generator'''] , __lowercase , __lowercase )
UpperCAmelCase_ : Optional[Any] = np.load(__lowercase )
UpperCAmelCase_ : Any = stats[0].reshape(-1 )
UpperCAmelCase_ : Dict = stats[1].reshape(-1 )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(__lowercase ).float()
UpperCAmelCase_ : int = torch.from_numpy(__lowercase ).float()
model.save_pretrained(__lowercase )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 641
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 1
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__UpperCamelCase : List[Any] = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def snake_case_ ( __lowercase=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=snake_case__ ) )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = None
A_ : Union[str, Any] = None
def _lowerCamelCase ( self : Tuple , __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Optional[int] = dataset_module_factory(__snake_case , cache_dir=__snake_case )
UpperCAmelCase_ : List[Any] = import_main_class(dataset_module.module_path , dataset=__snake_case )
UpperCAmelCase_ : DatasetBuilder = builder_cls(
cache_dir=__snake_case , config_name=__snake_case , hash=dataset_module.hash , )
UpperCAmelCase_ : List[Any] = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__snake_case ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase_ : List[str] = cached_path(__snake_case , cache_dir=__snake_case )
self.assertTrue(os.path.exists(__snake_case ) )
@pytest.mark.integration
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
UpperCAmelCase_ : List[str] = dataset_module_factory('''wikipedia''' , cache_dir=__lowercase )
UpperCAmelCase_ : int = import_main_class(dataset_module.module_path )
UpperCAmelCase_ : DatasetBuilder = builder_cls(
cache_dir=__lowercase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase_ : List[Any] = None
builder_instance.download_and_prepare()
UpperCAmelCase_ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = dataset_module_factory('''wikipedia''' , cache_dir=__lowercase )
UpperCAmelCase_ : Dict = import_main_class(dataset_module.module_path , dataset=__lowercase )
UpperCAmelCase_ : DatasetBuilder = builder_cls(
cache_dir=__lowercase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
UpperCAmelCase_ : List[Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__lowercase , __lowercase )
assert "train" in ds
assert isinstance(ds['''train'''] , __lowercase )
assert next(iter(ds['''train'''] ) )
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 1
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_ : Tuple = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Union[str, Any] = 4_8
UpperCAmelCase_ : Any = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_ : Any = [6, 6, 6, 6]
UpperCAmelCase_ : int = 6_0
UpperCAmelCase_ : int = [6, 6, 6, 6]
UpperCAmelCase_ : Any = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : Any = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Any = 1_2_6
UpperCAmelCase_ : Tuple = 7
UpperCAmelCase_ : Tuple = 2_5_5.0
UpperCAmelCase_ : Optional[Any] = ''''''
return config
def snake_case_ ( __lowercase , __lowercase ):
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase_ : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase_ : List[str] = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
UpperCAmelCase_ : int = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
UpperCAmelCase_ : List[str] = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
UpperCAmelCase_ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase_ : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase_ : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase_ : Dict = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase_ : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase_ : Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
UpperCAmelCase_ : Any = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
UpperCAmelCase_ : str = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
UpperCAmelCase_ : Optional[Any] = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
UpperCAmelCase_ : List[str] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
UpperCAmelCase_ : int = '''layernorm.weight'''
if name == "norm.bias":
UpperCAmelCase_ : int = '''layernorm.bias'''
if "conv_first" in name:
UpperCAmelCase_ : Tuple = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
UpperCAmelCase_ : Tuple = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
UpperCAmelCase_ : Any = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
UpperCAmelCase_ : Dict = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase_ : Tuple = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
UpperCAmelCase_ : str = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
UpperCAmelCase_ : List[Any] = '''swin2sr.''' + name
return name
def snake_case_ ( __lowercase , __lowercase ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : List[Any] = orig_state_dict.pop(__lowercase )
if "qkv" in key:
UpperCAmelCase_ : str = key.split('''.''' )
UpperCAmelCase_ : Dict = int(key_split[1] )
UpperCAmelCase_ : List[Any] = int(key_split[4] )
UpperCAmelCase_ : List[Any] = config.embed_dim
if "weight" in key:
UpperCAmelCase_ : Union[str, Any] = val[:dim, :]
UpperCAmelCase_ : Union[str, Any] = val[dim : dim * 2, :]
UpperCAmelCase_ : Dict = val[-dim:, :]
else:
UpperCAmelCase_ : Optional[int] = val[:dim]
UpperCAmelCase_ : int = val[dim : dim * 2]
UpperCAmelCase_ : List[Any] = val[-dim:]
pass
else:
UpperCAmelCase_ : List[Any] = val
return orig_state_dict
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = get_config(__lowercase )
UpperCAmelCase_ : Optional[int] = SwinaSRForImageSuperResolution(__lowercase )
model.eval()
UpperCAmelCase_ : Tuple = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Tuple = convert_state_dict(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model.load_state_dict(__lowercase , strict=__lowercase )
if len(__lowercase ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(__lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
UpperCAmelCase_ : List[Any] = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
UpperCAmelCase_ : Optional[int] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase_ : List[Any] = 1_2_6 if '''Jpeg''' in checkpoint_url else 2_5_6
UpperCAmelCase_ : int = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCAmelCase_ : Tuple = transforms(__lowercase ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase_ : Optional[int] = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase_ : List[Any] = model(__lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase_ : List[Any] = torch.Size([1, 3, 5_1_2, 5_1_2] )
UpperCAmelCase_ : int = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_ : Optional[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase_ : int = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
UpperCAmelCase_ : Any = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_ : Optional[int] = torch.Size([1, 3, 5_1_2, 5_1_2] )
UpperCAmelCase_ : Tuple = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_ : Optional[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
UpperCAmelCase_ : Dict = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __lowercase , atol=1e-3 )
print('''Looks ok!''' )
UpperCAmelCase_ : Any = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
UpperCAmelCase_ : List[Any] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowercase )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
__UpperCamelCase : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 1
|
__UpperCamelCase : str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 641
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 1
|
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if index == number_of_items:
return 0
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[Any] = knapsack(__lowercase , __lowercase , __lowercase , __lowercase , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ : Any = values[index] + knapsack(
__lowercase , __lowercase , __lowercase , max_weight - weights[index] , index + 1 )
return max(__lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 1
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCamelCase : Optional[int] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCamelCase : Tuple = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCamelCase : int = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def snake_case_ ( __lowercase , __lowercase ):
return float((preds == labels).mean() )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : List[Any] = simple_accuracy(__lowercase , __lowercase )
UpperCAmelCase_ : Optional[Any] = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Any = float(pearsonr(__lowercase , __lowercase )[0] )
UpperCAmelCase_ : Union[str, Any] = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case , __snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.