code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
snake_case_ = graph
# mapping node to its parent in resulting breadth first tree
snake_case_ = {}
snake_case_ = source_vertex
def a_ ( self) -> None:
snake_case_ = {self.source_vertex}
snake_case_ = None
snake_case_ = [self.source_vertex] # first in first out queue
while queue:
snake_case_ = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCAmelCase__)
snake_case_ = vertex
queue.append(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case_ = self.parent.get(lowerCAmelCase__)
if target_vertex_parent is None:
snake_case_ = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(lowerCAmelCase__)
return self.shortest_path(lowerCAmelCase__) + f'->{target_vertex}'
if __name__ == "__main__":
__UpperCamelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 361
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ = "OwlViTImageProcessor"
SCREAMING_SNAKE_CASE_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Any:
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCAmelCase__, )
snake_case_ = kwargs.pop('feature_extractor')
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowerCAmelCase__, lowerCAmelCase__)
def __call__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="max_length", lowerCAmelCase__="np", **lowerCAmelCase__) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.')
if text is not None:
if isinstance(lowerCAmelCase__, lowerCAmelCase__) or (isinstance(lowerCAmelCase__, lowerCAmelCase__) and not isinstance(text[0], lowerCAmelCase__)):
snake_case_ = [self.tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)]
elif isinstance(lowerCAmelCase__, lowerCAmelCase__) and isinstance(text[0], lowerCAmelCase__):
snake_case_ = []
# Maximum number of queries across batch
snake_case_ = max([len(lowerCAmelCase__) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase__) != max_num_queries:
snake_case_ = t + [' '] * (max_num_queries - len(lowerCAmelCase__))
snake_case_ = self.tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
encodings.append(lowerCAmelCase__)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if return_tensors == "np":
snake_case_ = np.concatenate([encoding['input_ids'] for encoding in encodings], axis=0)
snake_case_ = np.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
snake_case_ = jnp.concatenate([encoding['input_ids'] for encoding in encodings], axis=0)
snake_case_ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
snake_case_ = torch.cat([encoding['input_ids'] for encoding in encodings], dim=0)
snake_case_ = torch.cat([encoding['attention_mask'] for encoding in encodings], dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
snake_case_ = tf.stack([encoding['input_ids'] for encoding in encodings], axis=0)
snake_case_ = tf.stack([encoding['attention_mask'] for encoding in encodings], axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
snake_case_ = BatchEncoding()
snake_case_ = input_ids
snake_case_ = attention_mask
if query_images is not None:
snake_case_ = BatchEncoding()
snake_case_ = self.image_processor(
lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__).pixel_values
snake_case_ = query_pixel_values
if images is not None:
snake_case_ = self.image_processor(lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
if text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__), tensor_type=lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> List[str]:
return self.image_processor.post_process(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]:
return self.image_processor.post_process_object_detection(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]:
return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__)
@property
def a_ ( self) -> Any:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCAmelCase__, )
return self.image_processor_class
@property
def a_ ( self) -> List[str]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCAmelCase__, )
return self.image_processor
| 362
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 0
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCamelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 13_1072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return torch.atana(UpperCAmelCase , UpperCAmelCase ) / math.pi * 2
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCAmelCase , UpperCAmelCase )
class UpperCamelCase ( lowerCAmelCase__ ):
pass
class UpperCamelCase ( nn.Module ):
def __init__( self, lowerCAmelCase__) -> str:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(lowerCAmelCase__, n_attn_layers=4)
snake_case_ = deepcopy(self.diffusion)
snake_case_ = torch.quasirandom.SobolEngine(1, scramble=lowerCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ = MODELS_MAP[model_name]['url']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__UpperCamelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__UpperCamelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__UpperCamelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__UpperCamelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
for key, value in ATTN_MAP.items():
if name.startswith(UpperCAmelCase ) and not isinstance(UpperCAmelCase , UpperCAmelCase ):
return name.replace(UpperCAmelCase , UpperCAmelCase )
elif name.startswith(UpperCAmelCase ):
return [name.replace(UpperCAmelCase , UpperCAmelCase ) for v in value]
raise ValueError(f'Attn error with {name}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=13 ) -> List[Any]:
snake_case_ = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
snake_case_ = 0
if string.startswith('net.3.' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('net.' ):
snake_case_ = string[4:]
while string.startswith('main.7.' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('main.' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = 'mid_block'
elif depth > 0 and int(UpperCAmelCase ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = f'down_blocks.{depth}'
elif depth > 0 and int(UpperCAmelCase ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = f'up_blocks.{max_depth - 1}' if int(UpperCAmelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(UpperCAmelCase )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(UpperCAmelCase )
snake_case_ = new_string_left
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = prefix + '.' + new_layer + '.' + string_left
else:
snake_case_ = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def UpperCAmelCase ( UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(UpperCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = transform_conv_attns(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
snake_case_ = v
return new_state_dict
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
if len(UpperCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
snake_case_ = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
snake_case_ = download(UpperCAmelCase )
snake_case_ = MODELS_MAP[model_name]['sample_rate']
snake_case_ = MODELS_MAP[model_name]['sample_size']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=UpperCAmelCase , sample_rate=UpperCAmelCase )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(UpperCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCAmelCase )['state_dict'] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(UpperCAmelCase )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCAmelCase ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(UpperCAmelCase ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(UpperCAmelCase )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=UpperCAmelCase )
snake_case_ = torch.manual_seed(UpperCAmelCase )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=UpperCAmelCase ).to(UpperCAmelCase )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=UpperCAmelCase )[:-1]
snake_case_ = get_crash_schedule(UpperCAmelCase )
snake_case_ = DanceDiffusionPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=UpperCAmelCase , generator=UpperCAmelCase ).audios
snake_case_ = sampling.iplms_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , UpperCAmelCase )
print('Diff max' , UpperCAmelCase )
assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
main(args)
| 363
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCamelCase = False
class UpperCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> int:
snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion')
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
image=lowerCAmelCase__, generator=lowerCAmelCase__, guidance_scale=7.5, num_inference_steps=50, output_type='numpy', ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 364
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def a_ ( self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase__) for k, v in self.__dict__.items()})
| 365
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 366
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCamelCase = 25_0004
__UpperCamelCase = 25_0020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = MBartTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self) -> Dict:
snake_case_ = MBartTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase__, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
snake_case_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
def a_ ( self) -> Any:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
snake_case_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(lowerCAmelCase__, lowerCAmelCase__)
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__, lowerCAmelCase__))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__)
# Save tokenizer rust, legacy_format=True
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowerCAmelCase__, legacy_format=lowerCAmelCase__)
snake_case_ = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__, lowerCAmelCase__)
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__, lowerCAmelCase__))
shutil.rmtree(lowerCAmelCase__)
# Save tokenizer rust, legacy_format=False
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowerCAmelCase__, legacy_format=lowerCAmelCase__)
snake_case_ = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__, lowerCAmelCase__))
shutil.rmtree(lowerCAmelCase__)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "facebook/mbart-large-en-ro"
SCREAMING_SNAKE_CASE_ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
SCREAMING_SNAKE_CASE_ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
SCREAMING_SNAKE_CASE_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def a_ ( cls) -> Tuple:
snake_case_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en_XX', tgt_lang='ro_RO')
snake_case_ = 1
return cls
def a_ ( self) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'], 25_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'], 25_0004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'], 25_0020)
def a_ ( self) -> Any:
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCAmelCase__)
def a_ ( self) -> Any:
self.assertIn(lowerCAmelCase__, self.tokenizer.all_special_ids)
snake_case_ = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
snake_case_ = self.tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__)
snake_case_ = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0], lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.tokenizer(lowerCAmelCase__, max_length=lowerCAmelCase__, truncation=lowerCAmelCase__).input_ids[0]
self.assertEqual(ids[-2], 2)
self.assertEqual(ids[-1], lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']), [25_0026, 25_0001])
def a_ ( self) -> List[Any]:
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = MBartTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCAmelCase__)
@require_torch
def a_ ( self) -> int:
snake_case_ = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCAmelCase__, return_tensors='pt')
snake_case_ = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def a_ ( self) -> str:
snake_case_ = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=len(self.expected_src_tokens), return_tensors='pt', )
snake_case_ = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id)
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual((2, 14), batch.input_ids.shape)
self.assertEqual((2, 14), batch.attention_mask.shape)
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCAmelCase__)
self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE])
def a_ ( self) -> Optional[Any]:
snake_case_ = self.tokenizer(self.src_text, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=3, return_tensors='pt')
snake_case_ = self.tokenizer(
text_target=self.tgt_text, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=10, return_tensors='pt')
snake_case_ = targets['input_ids']
snake_case_ = shift_tokens_right(lowerCAmelCase__, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def a_ ( self) -> Any:
snake_case_ = self.tokenizer._build_translation_inputs(
'A test', return_tensors='pt', src_lang='en_XX', tgt_lang='ar_AR')
self.assertEqual(
nested_simplify(lowerCAmelCase__), {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 25_0004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
}, )
| 367
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 0
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__) -> Dict:
super().__init__()
snake_case_ = nn.ModuleList(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = False, lowerCAmelCase__ = True, ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase__, lowerCAmelCase__, self.nets)):
snake_case_ , snake_case_ = controlnet(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, )
# merge samples
if i == 0:
snake_case_ , snake_case_ = down_samples, mid_sample
else:
snake_case_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCAmelCase__, lowerCAmelCase__)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = False, lowerCAmelCase__ = None, ) -> str:
snake_case_ = 0
snake_case_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCAmelCase__, is_main_process=lowerCAmelCase__, save_function=lowerCAmelCase__, safe_serialization=lowerCAmelCase__, variant=lowerCAmelCase__, )
idx += 1
snake_case_ = model_path_to_save + f'_{idx}'
@classmethod
def a_ ( cls, lowerCAmelCase__, **lowerCAmelCase__) -> Dict:
snake_case_ = 0
snake_case_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ = pretrained_model_path
while os.path.isdir(lowerCAmelCase__):
snake_case_ = ControlNetModel.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__)
controlnets.append(lowerCAmelCase__)
idx += 1
snake_case_ = pretrained_model_path + f'_{idx}'
logger.info(f'{len(lowerCAmelCase__)} controlnets loaded from {pretrained_model_path}.')
if len(lowerCAmelCase__) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(lowerCAmelCase__)}. Expected at least {pretrained_model_path + "_0"}.')
return cls(lowerCAmelCase__)
| 368
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> bool:
snake_case_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 369
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list:
if len(UpperCAmelCase ) != 2 or len(a[0] ) != 2 or len(UpperCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
snake_case_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCAmelCase ) )
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCAmelCase ) )
]
def UpperCAmelCase ( UpperCAmelCase ) -> tuple[list, list, list, list]:
if len(UpperCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
snake_case_ = len(UpperCAmelCase )
snake_case_ = matrix_length // 2
snake_case_ = [[a[i][j] for j in range(UpperCAmelCase , UpperCAmelCase )] for i in range(UpperCAmelCase )]
snake_case_ = [
[a[i][j] for j in range(UpperCAmelCase , UpperCAmelCase )] for i in range(UpperCAmelCase , UpperCAmelCase )
]
snake_case_ = [[a[i][j] for j in range(UpperCAmelCase )] for i in range(UpperCAmelCase )]
snake_case_ = [[a[i][j] for j in range(UpperCAmelCase )] for i in range(UpperCAmelCase , UpperCAmelCase )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase ( UpperCAmelCase ) -> tuple[int, int]:
return len(UpperCAmelCase ), len(matrix[0] )
def UpperCAmelCase ( UpperCAmelCase ) -> None:
print('\n'.join(str(UpperCAmelCase ) for line in matrix ) )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list:
if matrix_dimensions(UpperCAmelCase ) == (2, 2):
return default_matrix_multiplication(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = split_matrix(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = split_matrix(UpperCAmelCase )
snake_case_ = actual_strassen(UpperCAmelCase , matrix_subtraction(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = actual_strassen(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
snake_case_ = actual_strassen(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
snake_case_ = actual_strassen(UpperCAmelCase , matrix_subtraction(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = actual_strassen(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , matrix_addition(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = actual_strassen(matrix_subtraction(UpperCAmelCase , UpperCAmelCase ) , matrix_addition(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = actual_strassen(matrix_subtraction(UpperCAmelCase , UpperCAmelCase ) , matrix_addition(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = matrix_addition(matrix_subtraction(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) , UpperCAmelCase )
snake_case_ = matrix_addition(UpperCAmelCase , UpperCAmelCase )
snake_case_ = matrix_addition(UpperCAmelCase , UpperCAmelCase )
snake_case_ = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) , UpperCAmelCase )
# construct the new matrix from our 4 quadrants
snake_case_ = []
for i in range(len(UpperCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(UpperCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list:
if matrix_dimensions(UpperCAmelCase )[1] != matrix_dimensions(UpperCAmelCase )[0]:
snake_case_ = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'Matrix A: {matrixa}\n'
f'Matrix B: {matrixa}'
)
raise Exception(UpperCAmelCase )
snake_case_ = matrix_dimensions(UpperCAmelCase )
snake_case_ = matrix_dimensions(UpperCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case_ = max(*UpperCAmelCase , *UpperCAmelCase )
snake_case_ = int(math.pow(2 , math.ceil(math.loga(UpperCAmelCase ) ) ) )
snake_case_ = matrixa
snake_case_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case_ = actual_strassen(UpperCAmelCase , UpperCAmelCase )
# Removing the additional zeros
for i in range(0 , UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 370
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = CanineTokenizer
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Dict:
super().setUp()
snake_case_ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def a_ ( self) -> Optional[Any]:
return CanineTokenizer.from_pretrained('google/canine-s')
def a_ ( self, **lowerCAmelCase__) -> CanineTokenizer:
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
snake_case_ = 1024
return tokenizer
@require_torch
def a_ ( self) -> int:
snake_case_ = self.canine_tokenizer
snake_case_ = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
snake_case_ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='pt')
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = list(batch.input_ids.numpy()[0])
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual((2, 39), batch.input_ids.shape)
self.assertEqual((2, 39), batch.attention_mask.shape)
@require_torch
def a_ ( self) -> Any:
snake_case_ = self.canine_tokenizer
snake_case_ = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='pt')
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids', lowerCAmelCase__)
self.assertIn('attention_mask', lowerCAmelCase__)
self.assertIn('token_type_ids', lowerCAmelCase__)
@require_torch
def a_ ( self) -> List[str]:
snake_case_ = self.canine_tokenizer
snake_case_ = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
snake_case_ = tokenizer(
text_target=lowerCAmelCase__, max_length=32, padding='max_length', truncation=lowerCAmelCase__, return_tensors='pt')
self.assertEqual(32, targets['input_ids'].shape[1])
def a_ ( self) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ' He is very happy, UNwant\u00E9d,running'
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__)
snake_case_ = after_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
shutil.rmtree(lowerCAmelCase__)
snake_case_ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ' He is very happy, UNwant\u00E9d,running'
snake_case_ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
snake_case_ = chr(0xe_007)
additional_special_tokens.append(lowerCAmelCase__)
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__)
snake_case_ = after_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertIn(lowerCAmelCase__, after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
snake_case_ , snake_case_ = self.get_clean_sequence(lowerCAmelCase__)
# a special token for Canine can be defined as follows:
snake_case_ = 0xe_005
snake_case_ = chr(lowerCAmelCase__)
tokenizer.add_special_tokens({'cls_token': special_token})
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__), 1)
snake_case_ = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, input_encoded + special_token_id)
snake_case_ = tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__)
self.assertTrue(special_token not in decoded)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
snake_case_ = chr(0xe_005)
snake_case_ = chr(0xe_006)
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=lowerCAmelCase__)
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]})
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__), 1)
self.assertEqual(len(lowerCAmelCase__), 1)
self.assertEqual(token_a[0], lowerCAmelCase__)
self.assertEqual(token_a[0], lowerCAmelCase__)
@require_tokenizers
def a_ ( self) -> Tuple:
snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# a special token for Canine can be defined as follows:
snake_case_ = 0xe_006
snake_case_ = chr(lowerCAmelCase__)
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__)
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]})
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase__)
tokenizer.from_pretrained(lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__, 'special_tokens_map.json'), encoding='utf-8') as json_file:
snake_case_ = json.load(lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__, 'tokenizer_config.json'), encoding='utf-8') as json_file:
snake_case_ = json.load(lowerCAmelCase__)
# a special token for Canine can be defined as follows:
snake_case_ = 0xe_006
snake_case_ = chr(lowerCAmelCase__)
snake_case_ = [new_token_a]
snake_case_ = [new_token_a]
with open(os.path.join(lowerCAmelCase__, 'special_tokens_map.json'), 'w', encoding='utf-8') as outfile:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__, 'tokenizer_config.json'), 'w', encoding='utf-8') as outfile:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case_ = tokenizer_class.from_pretrained(lowerCAmelCase__, extra_ids=0)
self.assertIn(lowerCAmelCase__, tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a])), )
snake_case_ = 0xe_007
snake_case_ = chr(lowerCAmelCase__)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case_ = [AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__)]
snake_case_ = tokenizer_class.from_pretrained(
lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, extra_ids=0)
self.assertIn(lowerCAmelCase__, tokenizer.additional_special_tokens)
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a])))
@require_tokenizers
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
snake_case_ = 'hello world'
if self.space_between_special_tokens:
snake_case_ = '[CLS] hello world [SEP]'
else:
snake_case_ = input
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.decode(lowerCAmelCase__, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(lowerCAmelCase__, [output, output.lower()])
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
snake_case_ = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case_ = 'a'
snake_case_ = ord(lowerCAmelCase__)
for attr in attributes_list:
setattr(lowerCAmelCase__, attr + '_id', lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__, attr + '_id'), lowerCAmelCase__)
setattr(lowerCAmelCase__, attr + '_id', lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__, attr + '_id'), lowerCAmelCase__)
setattr(lowerCAmelCase__, 'additional_special_tokens_ids', [])
self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens'), [])
self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens_ids'), [])
snake_case_ = 0xe_006
snake_case_ = chr(lowerCAmelCase__)
setattr(lowerCAmelCase__, 'additional_special_tokens_ids', [additional_special_token_id])
self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens'), [additional_special_token])
self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens_ids'), [additional_special_token_id])
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> Optional[int]:
pass
def a_ ( self) -> Tuple:
pass
def a_ ( self) -> Tuple:
pass
def a_ ( self) -> int:
pass
def a_ ( self) -> Optional[int]:
pass
def a_ ( self) -> int:
pass
| 371
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 0
|
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=None , __lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
lowercase__ = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowercase__ = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "Use FP16 to accelerate inference."} )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "Benchmark training of model"} )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "Verbose memory tracing"} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "Trace memory line by line"} )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "Save result to a CSV file"} )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "Save all print statements in a log file"} )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "Whether to print environment information"} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
lowercase__ = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , )
lowercase__ = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
lowercase__ = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
lowercase__ = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
lowercase__ = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , )
lowercase__ = field(
default=F"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , )
lowercase__ = field(default=3 , metadata={"help": "Times an experiment will be run."} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if len(self.models) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""")
return self.models
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""")
return False
else:
return True
| 313
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = (3, 3_2, 1_2_8)
lowercase_ = tempfile.mkdtemp()
# fmt: off
lowercase_ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase_ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_))))
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(lowerCAmelCase_) + """\n""")
lowercase_ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 3_2, """width""": 1_2_8},
}
lowercase_ = os.path.join(self.tmpdirname , lowerCAmelCase_)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , **lowerCAmelCase_ : List[str]):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)
lowercase_ = Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1))
return image_input
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_image_processor()
lowercase_ = MgpstrProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
processor.save_pretrained(self.tmpdirname)
lowercase_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_image_processor()
lowercase_ = MgpstrProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
processor.save_pretrained(self.tmpdirname)
lowercase_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowercase_ = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0)
lowercase_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = MgpstrProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
lowercase_ = self.prepare_image_inputs()
lowercase_ = image_processor(lowerCAmelCase_ , return_tensors="""np""")
lowercase_ = processor(images=lowerCAmelCase_ , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = MgpstrProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
lowercase_ = """test"""
lowercase_ = processor(text=lowerCAmelCase_)
lowercase_ = tokenizer(lowerCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = MgpstrProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
lowercase_ = """test"""
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_):
processor()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = MgpstrProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
lowercase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ = processor.char_decode(lowerCAmelCase_)
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_)
lowercase_ = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = MgpstrProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
lowercase_ = None
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = MgpstrProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
lowercase_ = torch.randn(1 , 2_7 , 3_8)
lowercase_ = torch.randn(1 , 2_7 , 5_0_2_5_7)
lowercase_ = torch.randn(1 , 2_7 , 3_0_5_2_2)
lowercase_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 313
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : int = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=8 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=5_12 , __lowerCAmelCase=5_12 ) -> Any:
'''simple docstring'''
lowercase_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ = np.array(pil_image.convert("""RGB""" ) )
lowercase_ = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ = np.transpose(__lowerCAmelCase , [2, 0, 1] )
lowercase_ = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
lowercase_ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = min(int(num_inference_steps * strength) , lowerCAmelCase_)
lowercase_ = max(num_inference_steps - init_timestep , 0)
lowercase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=None):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_)}''')
lowercase_ = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_)
lowercase_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and len(lowerCAmelCase_) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCAmelCase_)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = [
self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCAmelCase_)
]
lowercase_ = torch.cat(lowerCAmelCase_ , dim=0)
else:
lowercase_ = self.movq.encode(lowerCAmelCase_).latent_dist.sample(lowerCAmelCase_)
lowercase_ = self.movq.config.scaling_factor * init_latents
lowercase_ = torch.cat([init_latents] , dim=0)
lowercase_ = init_latents.shape
lowercase_ = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_)
# get latents
lowercase_ = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = init_latents
return latents
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Tuple=0):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
lowercase_ = torch.device(F'''cuda:{gpu_id}''')
lowercase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any]=0):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0"""):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""")
lowercase_ = torch.device(F'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCAmelCase_)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_)
# We'll offload the last model manually.
lowercase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook"""):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_)
def __call__( self : int , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
lowercase_ = self._execution_device
lowercase_ = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = torch.cat(lowerCAmelCase_ , dim=0)
lowercase_ = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = torch.cat(lowerCAmelCase_ , dim=0)
if do_classifier_free_guidance:
lowercase_ = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0)
lowercase_ = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0)
lowercase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowerCAmelCase_)
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor)) for i in image):
raise ValueError(
F'''Input is in incorrect format: {[type(lowerCAmelCase_) for i in image]}. Currently, we only support PIL image and pytorch tensor''')
lowercase_ = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) for i in image] , dim=0)
lowercase_ = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_)
lowercase_ = self.movq.encode(lowerCAmelCase_)["""latents"""]
lowercase_ = latents.repeat_interleave(lowerCAmelCase_ , dim=0)
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ , lowercase_ = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = timesteps[:1].repeat(batch_size * num_images_per_prompt)
lowercase_ , lowercase_ = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor)
lowercase_ = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_)
for i, t in enumerate(self.progress_bar(lowerCAmelCase_)):
# expand the latents if we are doing classifier free guidance
lowercase_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase_ = {"""image_embeds""": image_embeds}
lowercase_ = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ = noise_pred.split(latents.shape[1] , dim=1)
lowercase_ , lowercase_ = noise_pred.chunk(2)
lowercase_ , lowercase_ = variance_pred.chunk(2)
lowercase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , """variance_type""")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
lowercase_ = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
lowercase_ = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_)["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
lowercase_ = image * 0.5 + 0.5
lowercase_ = image.clamp(0 , 1)
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(lowerCAmelCase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_)
| 313
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
| 1
|
"""simple docstring"""
from math import factorial, radians
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 18 , __lowerCAmelCase = 10 ) -> float:
'''simple docstring'''
lowercase_ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
lowercase_ = radians(__lowerCAmelCase )
lowercase_ = angle_in_radians
lowercase_ = 3
lowercase_ = -1
for _ in range(__lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(__lowerCAmelCase )
lowercase_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 313
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = []
lowercase_ = []
lowercase_ = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
lowercase_ = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(__lowerCAmelCase ) , """Postfix""".center(__lowerCAmelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
lowercase_ = """)""" # change "(" to ")"
elif infix[i] == ")":
lowercase_ = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase : List[str] = input("\nEnter an Infix Equation = ") # Input an Infix equation
UpperCAmelCase : Optional[Any] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 313
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False):
"""simple docstring"""
lowercase_ = scheduler
lowercase_ = optimizers if isinstance(lowerCAmelCase_ , (list, tuple)) else [optimizers]
lowercase_ = split_batches
lowercase_ = step_with_optimizer
lowercase_ = GradientState()
def _UpperCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : int):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCAmelCase_ , **lowerCAmelCase_)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCAmelCase_ , **lowerCAmelCase_)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowercase_ = AcceleratorState().num_processes
for _ in range(lowerCAmelCase_):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps"""):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCAmelCase_ , **lowerCAmelCase_)
else:
self.scheduler.step(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.scheduler.get_last_lr()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return self.scheduler.state_dict()
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
self.scheduler.load_state_dict(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return self.scheduler.get_lr()
def _UpperCAmelCase ( self : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.scheduler.print_lr(*lowerCAmelCase_ , **lowerCAmelCase_)
| 313
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 313
| 1
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> np.array:
'''simple docstring'''
lowercase_ = F'''{sampling_rate}'''
lowercase_ = """1"""
lowercase_ = """f32le"""
lowercase_ = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(__lowerCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase_ = ffmpeg_process.communicate(__lowerCAmelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowercase_ = output_stream[0]
lowercase_ = np.frombuffer(__lowerCAmelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "f32le" , ) -> List[Any]:
'''simple docstring'''
lowercase_ = F'''{sampling_rate}'''
lowercase_ = """1"""
if format_for_conversion == "s16le":
lowercase_ = 2
elif format_for_conversion == "f32le":
lowercase_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase_ = platform.system()
if system == "Linux":
lowercase_ = """alsa"""
lowercase_ = """default"""
elif system == "Darwin":
lowercase_ = """avfoundation"""
lowercase_ = """:0"""
elif system == "Windows":
lowercase_ = """dshow"""
lowercase_ = """default"""
lowercase_ = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowercase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase_ = _ffmpeg_stream(__lowerCAmelCase , __lowerCAmelCase )
for item in iterator:
yield item
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "f32le" , ) -> str:
'''simple docstring'''
if stream_chunk_s is not None:
lowercase_ = stream_chunk_s
else:
lowercase_ = chunk_length_s
lowercase_ = ffmpeg_microphone(__lowerCAmelCase , __lowerCAmelCase , format_for_conversion=__lowerCAmelCase )
if format_for_conversion == "s16le":
lowercase_ = np.intaa
lowercase_ = 2
elif format_for_conversion == "f32le":
lowercase_ = np.floataa
lowercase_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase_ = chunk_length_s / 6
lowercase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__lowerCAmelCase , (int, float) ):
lowercase_ = [stride_length_s, stride_length_s]
lowercase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase_ = datetime.datetime.now()
lowercase_ = datetime.timedelta(seconds=__lowerCAmelCase )
for item in chunk_bytes_iter(__lowerCAmelCase , __lowerCAmelCase , stride=(stride_left, stride_right) , stream=__lowerCAmelCase ):
# Put everything back in numpy scale
lowercase_ = np.frombuffer(item["""raw"""] , dtype=__lowerCAmelCase )
lowercase_ = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowercase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> List[Any]:
'''simple docstring'''
lowercase_ = b""""""
lowercase_ , lowercase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase_ = 0
for raw in iterator:
acc += raw
if stream and len(__lowerCAmelCase ) < chunk_len:
lowercase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__lowerCAmelCase ) >= chunk_len:
# We are flushing the accumulator
lowercase_ = (_stride_left, stride_right)
lowercase_ = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowercase_ = False
yield item
lowercase_ = stride_left
lowercase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__lowerCAmelCase ) > stride_left:
lowercase_ = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowercase_ = False
yield item
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__lowerCAmelCase , stdout=subprocess.PIPE , bufsize=__lowerCAmelCase ) as ffmpeg_process:
while True:
lowercase_ = ffmpeg_process.stdout.read(__lowerCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 313
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_enforce_args(__lowerCAmelCase , __lowerCAmelCase )
if n == 0:
return 0
lowercase_ = float("""-inf""" )
for i in range(1 , n + 1 ):
lowercase_ = max(
__lowerCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , __lowerCAmelCase ) )
return max_revue
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
_enforce_args(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase_ = float("""-inf""" )
for i in range(1 , n + 1 ):
lowercase_ = max(
__lowerCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __lowerCAmelCase , __lowerCAmelCase ) , )
lowercase_ = max_revenue
return max_rev[n]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
_enforce_args(__lowerCAmelCase , __lowerCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase_ = [float("""-inf""" ) for _ in range(n + 1 )]
lowercase_ = 0
for i in range(1 , n + 1 ):
lowercase_ = max_rev[i]
for j in range(1 , i + 1 ):
lowercase_ = max(__lowerCAmelCase , prices[j - 1] + max_rev[i - j] )
lowercase_ = max_revenue_i
return max_rev[n]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if n < 0:
lowercase_ = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(__lowerCAmelCase )
if n > len(__lowerCAmelCase ):
lowercase_ = (
"""Each integral piece of rod must have a corresponding price. """
F'''Got n = {n} but length of prices = {len(__lowerCAmelCase )}'''
)
raise ValueError(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = [6, 10, 12, 15, 20, 23]
lowercase_ = len(__lowerCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase_ = 36
lowercase_ = top_down_cut_rod(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = bottom_up_cut_rod(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = naive_cut_rod_recursive(__lowerCAmelCase , __lowerCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 313
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 1
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase : str = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowercase__ = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
lowercase__ = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "A csv or a json file containing the training data."} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "A csv or a json file containing the validation data."} )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""")
else:
lowercase_ = self.train_file.split(""".""")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase_ = self.validation_file.split(""".""")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowercase__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase_ = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
datasets.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase_ = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase_ = data_args.train_file.split(""".""" )[-1]
lowercase_ = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase_ = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
lowercase_ = load_dataset("""csv""" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase_ = load_dataset("""json""" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase_ = raw_datasets["""train"""].features["""label"""].names
lowercase_ = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowerCAmelCase , )
lowercase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase_ = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase_ = {"""Refused""": 0, """Entailed""": 1}
lowercase_ = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowercase_ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__lowerCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__lowerCAmelCase ):
lowercase_ = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
lowercase_ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase_ = examples["""statement"""]
lowercase_ = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
lowercase_ = tokenizer(__lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase )
lowercase_ = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
lowercase_ = raw_datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowercase_ = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowercase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowercase_ = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowercase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
lowercase_ = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
lowercase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowerCAmelCase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
lowercase_ = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase ) else p.predictions
lowercase_ = np.argmax(__lowerCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase_ = default_data_collator
elif training_args.fpaa:
lowercase_ = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 )
else:
lowercase_ = None
# Initialize our Trainer
lowercase_ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
lowercase_ = train_result.metrics
lowercase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
lowercase_ = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __lowerCAmelCase )
trainer.save_metrics("""train""" , __lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase_ = trainer.evaluate(eval_dataset=__lowerCAmelCase )
lowercase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase )
lowercase_ = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("""eval""" , __lowerCAmelCase )
trainer.save_metrics("""eval""" , __lowerCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase_ = predict_dataset.remove_columns("""label""" )
lowercase_ = trainer.predict(__lowerCAmelCase , metric_key_prefix="""predict""" ).predictions
lowercase_ = np.argmax(__lowerCAmelCase , axis=1 )
lowercase_ = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__lowerCAmelCase ):
lowercase_ = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
lowercase_ = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 313
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase : Optional[int] = 1.6021E-19 # units = C
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
"""simple docstring"""
UpperCAmelCase : int = 0 # The first color of the flag.
UpperCAmelCase : int = 1 # The second color of the flag.
UpperCAmelCase : int = 2 # The third color of the flag.
UpperCAmelCase : Dict = (red, white, blue)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if not sequence:
return []
if len(__lowerCAmelCase ) == 1:
return list(__lowerCAmelCase )
lowercase_ = 0
lowercase_ = len(__lowerCAmelCase ) - 1
lowercase_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowercase_ , lowercase_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowercase_ , lowercase_ = sequence[high], sequence[mid]
high -= 1
else:
lowercase_ = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Tuple = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase : Union[str, Any] = [int(item.strip()) for item in user_input.split(",")]
print(F"{dutch_national_flag_sort(unsorted)}")
| 313
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[str]=1_8 , lowerCAmelCase_ : List[str]=3_0 , lowerCAmelCase_ : Union[str, Any]=4_0_0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Union[str, Any]=True , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size_divisor
lowercase_ = do_rescale
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = GLPNImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = GLPNImageProcessingTester(self)
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """size_divisor"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """resample"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """do_rescale"""))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 313
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 1
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for line in lines:
lowercase_ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowercase_ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowercase_ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase : str = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase : List[str] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase : Tuple = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
UpperCAmelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 313
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 1
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=1_3 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=9_9 , lowerCAmelCase_ : Any=3_2 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[Any]=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Optional[Any]=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , use_stable_embedding=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = OpenLlamaModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
lowercase_ = True
lowercase_ = OpenLlamaModel(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
lowercase_ = OpenLlamaForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ):
"""simple docstring"""
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
# first forward pass
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1)
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )["""hidden_states"""][0]
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase__ = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = OpenLlamaModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1).to(lowerCAmelCase_)
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase_ = OpenLlamaForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1).to(lowerCAmelCase_)
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase_ = OpenLlamaForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1).to(lowerCAmelCase_)
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
lowercase_ = OpenLlamaForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)])
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 1_0] , config.vocab_size)
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(lowerCAmelCase_)
original_model.to(lowerCAmelCase_)
original_model.eval()
lowercase_ = original_model(lowerCAmelCase_).last_hidden_state
lowercase_ = original_model(lowerCAmelCase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(lowerCAmelCase_)
scaled_model.to(lowerCAmelCase_)
scaled_model.eval()
lowercase_ = scaled_model(lowerCAmelCase_).last_hidden_state
lowercase_ = scaled_model(lowerCAmelCase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-5))
else:
self.assertFalse(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-5))
| 313
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
lowercase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase_ = 1
if upper_limit > 0:
lowercase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
UpperCAmelCase : Optional[Any] = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
| 1
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
lowercase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
if metric == "rouge2":
lowercase_ = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
lowercase_ = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
lowercase_ = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
lowercase_ = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
lowercase_ = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lowerCAmelCase_)
@rank_zero_only
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=True):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''')
lowercase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]})
# Log results
lowercase_ = Path(pl_module.hparams.output_dir)
if type_path == "test":
lowercase_ = od / """test_results.txt"""
lowercase_ = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowercase_ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowerCAmelCase_)
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_)
with open(lowerCAmelCase_ , """a+""") as writer:
for key in sorted(lowerCAmelCase_):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor):
lowercase_ = val.item()
lowercase_ = F'''{key}: {val:.6f}\n'''
writer.write(lowerCAmelCase_)
if not save_generations:
return
if "preds" in metrics:
lowercase_ = """\n""".join(metrics["""preds"""])
generations_file.open("""w+""").write(lowerCAmelCase_)
@rank_zero_only
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
try:
lowercase_ = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ = pl_module.model.num_parameters()
lowercase_ = count_trainable_parameters(lowerCAmelCase_)
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6})
@rank_zero_only
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , """test""")
@rank_zero_only
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Dict):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]=9_9 , lowerCAmelCase_ : str=[1, 1, 2] , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : List[Any]="gelu_new" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=5_1_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=False , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = block_sizes
lowercase_ = num_decoder_layers
lowercase_ = d_model
lowercase_ = n_head
lowercase_ = d_head
lowercase_ = d_inner
lowercase_ = hidden_act
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = 2
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase_ = n_head
# Used in the tests to check the size of the first hidden state
lowercase_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase_ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase_ = self.num_hidden_layers + 2
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , ):
"""simple docstring"""
lowercase_ = TFFunnelModel(config=lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowerCAmelCase_)
lowercase_ = [input_ids, input_mask]
lowercase_ = model(lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase_ = False
lowercase_ = TFFunnelModel(config=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase_ = False
lowercase_ = TFFunnelModel(config=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
lowercase_ = TFFunnelBaseModel(config=lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowerCAmelCase_)
lowercase_ = [input_ids, input_mask]
lowercase_ = model(lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase_ = False
lowercase_ = TFFunnelBaseModel(config=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase_ = False
lowercase_ = TFFunnelBaseModel(config=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , ):
"""simple docstring"""
lowercase_ = TFFunnelForPreTraining(config=lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
lowercase_ = TFFunnelForMaskedLM(config=lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , ):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = TFFunnelForSequenceClassification(config=lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = TFFunnelForMultipleChoice(config=lowerCAmelCase_)
lowercase_ = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1) , (1, self.num_choices, 1))
lowercase_ = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1) , (1, self.num_choices, 1))
lowercase_ = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1) , (1, self.num_choices, 1))
lowercase_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = TFFunnelForTokenClassification(config=lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , ):
"""simple docstring"""
lowercase_ = TFFunnelForQuestionAnswering(config=lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = TFFunnelModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_)
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = TFFunnelModelTester(self , base=lowerCAmelCase_)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_)
| 313
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 1
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum ):
lowercase__ = 0
lowercase__ = 1
@add_end_docstrings(__UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "generated"
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
lowercase_ = {}
if truncation is not None:
lowercase_ = truncation
lowercase_ = generate_kwargs
lowercase_ = {}
if return_tensors is not None and return_type is None:
lowercase_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
if len(lowerCAmelCase_) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowercase_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int):
"""simple docstring"""
return True
def _UpperCAmelCase ( self : Optional[Any] , *lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , lowerCAmelCase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""")
lowercase_ = ([prefix + arg for arg in args[0]],)
lowercase_ = True
elif isinstance(args[0] , lowerCAmelCase_):
lowercase_ = (prefix + args[0],)
lowercase_ = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''')
lowercase_ = self.tokenizer(*lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_)
if (
isinstance(args[0] , lowerCAmelCase_)
and all(isinstance(lowerCAmelCase_ , lowerCAmelCase_) for el in args[0])
and all(len(lowerCAmelCase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self._parse_and_tokenize(lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_)
return inputs
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
if self.framework == "pt":
lowercase_ , lowercase_ = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
lowercase_ , lowercase_ = tf.shape(model_inputs["""input_ids"""]).numpy()
lowercase_ = generate_kwargs.get("""min_length""" , self.model.config.min_length)
lowercase_ = generate_kwargs.get("""max_length""" , self.model.config.max_length)
self.check_inputs(lowerCAmelCase_ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""])
lowercase_ = self.model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = output_ids.shape[0]
if self.framework == "pt":
lowercase_ = output_ids.reshape(lowerCAmelCase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
lowercase_ = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str=ReturnType.TEXT , lowerCAmelCase_ : int=False):
"""simple docstring"""
lowercase_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ = {
F'''{self.return_name}_text''': self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
}
records.append(lowerCAmelCase_)
return records
@add_end_docstrings(__UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "summary"
def __call__( self : Union[str, Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Any):
"""simple docstring"""
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int):
"""simple docstring"""
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''')
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''')
@add_end_docstrings(__UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "translation"
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"""increasing your max_length manually, e.g. translator('...', max_length=400)""")
return True
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any=TruncationStrategy.DO_NOT_TRUNCATE , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple=None):
"""simple docstring"""
if getattr(self.tokenizer , """_build_translation_inputs""" , lowerCAmelCase_):
return self.tokenizer._build_translation_inputs(
*lowerCAmelCase_ , return_tensors=self.framework , truncation=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_)
else:
return super()._parse_and_tokenize(*lowerCAmelCase_ , truncation=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = super()._sanitize_parameters(**lowerCAmelCase_)
if src_lang is not None:
lowercase_ = src_lang
if tgt_lang is not None:
lowercase_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ = kwargs.get("""task""" , self.task)
lowercase_ = task.split("""_""")
if task and len(lowerCAmelCase_) == 4:
# translation, XX, to YY
lowercase_ = items[1]
lowercase_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : int , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_)
| 313
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = "Hello, World!"
UpperCAmelCase : Optional[Any] = "en_XX"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = Path("""data_bin""" )
lowercase_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__lowerCAmelCase ).parent ) , checkpoint_file=Path(__lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(__lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(__lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(__lowerCAmelCase )
lowercase_ = xmod.model.encoder.sentence_encoder
lowercase_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowercase_ = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , __lowerCAmelCase )
lowercase_ = XmodForSequenceClassification(__lowerCAmelCase ) if classification_head else XmodForMaskedLM(__lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase_ = xmod_sent_encoder.embed_tokens.weight
lowercase_ = xmod_sent_encoder.embed_positions.weight
lowercase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowercase_ = xmod_sent_encoder.layernorm_embedding.weight
lowercase_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase_ = model.roberta.encoder.layer[i]
lowercase_ = xmod_sent_encoder.layers[i]
# self attention
lowercase_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
lowercase_ = xmod_layer.self_attn.q_proj.weight
lowercase_ = xmod_layer.self_attn.q_proj.bias
lowercase_ = xmod_layer.self_attn.k_proj.weight
lowercase_ = xmod_layer.self_attn.k_proj.bias
lowercase_ = xmod_layer.self_attn.v_proj.weight
lowercase_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowercase_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
lowercase_ = xmod_layer.self_attn.out_proj.weight
lowercase_ = xmod_layer.self_attn.out_proj.bias
lowercase_ = xmod_layer.self_attn_layer_norm.weight
lowercase_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowercase_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
lowercase_ = xmod_layer.fca.weight
lowercase_ = xmod_layer.fca.bias
# output
lowercase_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
lowercase_ = xmod_layer.fca.weight
lowercase_ = xmod_layer.fca.bias
lowercase_ = xmod_layer.final_layer_norm.weight
lowercase_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowercase_ = xmod_layer.adapter_layer_norm.weight
lowercase_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowercase_ = bert_output.adapter_modules[lang_code]
lowercase_ = xmod_layer.adapter_modules[lang_code]
lowercase_ = from_adapter.fca.weight
lowercase_ = from_adapter.fca.bias
lowercase_ = from_adapter.fca.weight
lowercase_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowercase_ = xmod_sent_encoder.layer_norm.weight
lowercase_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowercase_ = xmod.model.classification_heads["""mnli"""].dense.weight
lowercase_ = xmod.model.classification_heads["""mnli"""].dense.bias
lowercase_ = xmod.model.classification_heads["""mnli"""].out_proj.weight
lowercase_ = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowercase_ = xmod.model.encoder.lm_head.dense.weight
lowercase_ = xmod.model.encoder.lm_head.dense.bias
lowercase_ = xmod.model.encoder.lm_head.layer_norm.weight
lowercase_ = xmod.model.encoder.lm_head.layer_norm.bias
lowercase_ = xmod.model.encoder.lm_head.weight
lowercase_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase_ = xmod.encode(__lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__lowerCAmelCase )
lowercase_ = model(__lowerCAmelCase )[0]
if classification_head:
lowercase_ = xmod.model.classification_heads["""mnli"""](xmod.extract_features(__lowerCAmelCase ) )
else:
lowercase_ = xmod.model(__lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowercase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowercase_ = torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(__lowerCAmelCase ).mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 313
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BioGptTokenizer
lowercase__ = False
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase_ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_))))
lowercase_ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""") as fp:
fp.write(json.dumps(lowerCAmelCase_))
with open(self.merges_file , """w""") as fp:
fp.write("""\n""".join(lowerCAmelCase_))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = """lower newer"""
lowercase_ = """lower newer"""
return input_text, output_text
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = BioGptTokenizer(self.vocab_file , self.merges_file)
lowercase_ = """lower"""
lowercase_ = ["""low""", """er</w>"""]
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokens + ["""<unk>"""]
lowercase_ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""")
lowercase_ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_)
lowercase_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_)
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_)
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 313
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = KandinskyVaaControlnetImgaImgPipeline
lowercase__ = ["image_embeds", "negative_image_embeds", "image", "hint"]
lowercase__ = ["image_embeds", "negative_image_embeds", "image", "hint"]
lowercase__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase__ = False
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return 3_2
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return 3_2
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return self.time_input_dim
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return 1_0_0
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
torch.manual_seed(0)
lowercase_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase_ = UNetaDConditionModel(**lowerCAmelCase_)
return model
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
torch.manual_seed(0)
lowercase_ = VQModel(**self.dummy_movq_kwargs)
return model
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.dummy_unet
lowercase_ = self.dummy_movq
lowercase_ = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowercase_ = DDIMScheduler(**lowerCAmelCase_)
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=0):
"""simple docstring"""
lowercase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
lowercase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowerCAmelCase_)
# create init_image
lowercase_ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
lowercase_ = Image.fromarray(np.uinta(lowerCAmelCase_)).convert("""RGB""").resize((2_5_6, 2_5_6))
# create hint
lowercase_ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
if str(lowerCAmelCase_).startswith("""mps"""):
lowercase_ = torch.manual_seed(lowerCAmelCase_)
else:
lowercase_ = torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
lowercase_ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**lowerCAmelCase_)
lowercase_ = pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = pipe(**self.get_dummy_inputs(lowerCAmelCase_))
lowercase_ = output.images
lowercase_ = pipe(
**self.get_dummy_inputs(lowerCAmelCase_) , return_dict=lowerCAmelCase_ , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""")
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
lowercase_ = init_image.resize((5_1_2, 5_1_2))
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""")
lowercase_ = torch.from_numpy(np.array(lowerCAmelCase_)).float() / 255.0
lowercase_ = hint.permute(2 , 0 , 1).unsqueeze(0)
lowercase_ = """A robot, 4k photo"""
lowercase_ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase_)
lowercase_ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa)
lowercase_ = pipeline.to(lowerCAmelCase_)
pipeline.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = torch.Generator(device="""cpu""").manual_seed(0)
lowercase_ , lowercase_ = pipe_prior(
lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.85 , generator=lowerCAmelCase_ , negative_prompt="""""" , ).to_tuple()
lowercase_ = pipeline(
image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , hint=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_)
| 313
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = 42
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module , __UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = 32
lowercase__ = 4
lowercase__ = 4
lowercase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowercase__ = False
lowercase__ = (320, 640, 1280, 1280)
lowercase__ = 2
lowercase__ = 8
lowercase__ = None
lowercase__ = 1280
lowercase__ = 0.0
lowercase__ = False
lowercase__ = jnp.floataa
lowercase__ = True
lowercase__ = 0
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : jax.random.KeyArray):
"""simple docstring"""
lowercase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase_ = jnp.zeros(lowerCAmelCase_ , dtype=jnp.floataa)
lowercase_ = jnp.ones((1,) , dtype=jnp.intaa)
lowercase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
lowercase_ , lowercase_ = jax.random.split(lowerCAmelCase_)
lowercase_ = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)["params"]
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.block_out_channels
lowercase_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""")
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase_ = self.num_attention_heads or self.attention_head_dim
# input
lowercase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
lowercase_ = FlaxTimestepEmbedding(lowerCAmelCase_ , dtype=self.dtype)
lowercase_ = self.only_cross_attention
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = (only_cross_attention,) * len(self.down_block_types)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = (num_attention_heads,) * len(self.down_block_types)
# down
lowercase_ = []
lowercase_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
lowercase_ = output_channel
lowercase_ = block_out_channels[i]
lowercase_ = i == len(lowerCAmelCase_) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase_ = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowercase_ = FlaxDownBlockaD(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase_)
lowercase_ = down_blocks
# mid
lowercase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowercase_ = []
lowercase_ = list(reversed(lowerCAmelCase_))
lowercase_ = list(reversed(lowerCAmelCase_))
lowercase_ = list(reversed(lowerCAmelCase_))
lowercase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
lowercase_ = output_channel
lowercase_ = reversed_block_out_channels[i]
lowercase_ = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase_) - 1)]
lowercase_ = i == len(lowerCAmelCase_) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowercase_ = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowercase_ = FlaxUpBlockaD(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase_)
lowercase_ = output_channel
lowercase_ = up_blocks
# out
lowercase_ = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5)
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , jnp.ndarray):
lowercase_ = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(lowerCAmelCase_ , jnp.ndarray) and len(timesteps.shape) == 0:
lowercase_ = timesteps.astype(dtype=jnp.floataa)
lowercase_ = jnp.expand_dims(lowerCAmelCase_ , 0)
lowercase_ = self.time_proj(lowerCAmelCase_)
lowercase_ = self.time_embedding(lowerCAmelCase_)
# 2. pre-process
lowercase_ = jnp.transpose(lowerCAmelCase_ , (0, 2, 3, 1))
lowercase_ = self.conv_in(lowerCAmelCase_)
# 3. down
lowercase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ , lowercase_ = down_block(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , deterministic=not train)
else:
lowercase_ , lowercase_ = down_block(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowercase_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase_ , lowerCAmelCase_):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowercase_ = new_down_block_res_samples
# 4. mid
lowercase_ = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowercase_ = down_block_res_samples[-(self.layers_per_block + 1) :]
lowercase_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = up_block(
lowerCAmelCase_ , temb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , deterministic=not train , )
else:
lowercase_ = up_block(lowerCAmelCase_ , temb=lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , deterministic=not train)
# 6. post-process
lowercase_ = self.conv_norm_out(lowerCAmelCase_)
lowercase_ = nn.silu(lowerCAmelCase_)
lowercase_ = self.conv_out(lowerCAmelCase_)
lowercase_ = jnp.transpose(lowerCAmelCase_ , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase_)
| 313
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 1
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 313
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 1
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00_00_00 , __lowerCAmelCase = 10 ) -> int:
'''simple docstring'''
lowercase_ = defaultdict(__lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowercase_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowercase_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 313
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
| 1
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase : Tuple = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : ArgumentParser):
"""simple docstring"""
lowercase_ = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Model's type.""")
train_parser.add_argument(
"""--tf_checkpoint""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""TensorFlow checkpoint path or folder.""")
train_parser.add_argument(
"""--pytorch_dump_output""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to the PyTorch saved model output.""")
train_parser.add_argument("""--config""" , type=lowerCAmelCase_ , default="""""" , help="""Configuration file path or folder.""")
train_parser.add_argument(
"""--finetuning_task_name""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=lowerCAmelCase_)
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , *lowerCAmelCase_ : int , ):
"""simple docstring"""
lowercase_ = logging.get_logger("""transformers-cli/converting""")
self._logger.info(F'''Loading model {model_type}''')
lowercase_ = model_type
lowercase_ = tf_checkpoint
lowercase_ = pytorch_dump_output
lowercase_ = config
lowercase_ = finetuning_task_name
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_)
if "ckpt" in self._tf_checkpoint.lower():
lowercase_ = self._tf_checkpoint
lowercase_ = """"""
else:
lowercase_ = self._tf_checkpoint
lowercase_ = """"""
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase_ , self._config , self._pytorch_dump_output , lowerCAmelCase_)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""")
| 313
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = ["pixel_values"]
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = size if size is not None else {"""shortest_edge""": 2_2_4}
lowercase_ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_)
lowercase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowercase_ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name="""crop_size""")
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ = do_convert_rgb
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ):
"""simple docstring"""
lowercase_ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
lowercase_ = get_resize_output_image_size(lowerCAmelCase_ , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase_)
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
lowercase_ = get_size_dict(lowerCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowerCAmelCase_ , param_name="""size""" , default_to_square=lowerCAmelCase_)
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" , default_to_square=lowerCAmelCase_)
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(lowerCAmelCase_) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowerCAmelCase_) for image in images]
if do_resize:
lowercase_ = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_) for image in images]
lowercase_ = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_)
| 313
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 313
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
lowercase_ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowercase_ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
lowercase_ = mam_aaa["""model"""]
remove_ignore_keys_(__lowerCAmelCase )
lowercase_ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowercase_ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowercase_ = state_dict["""decoder.embed_tokens.weight"""]
lowercase_ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : str = parser.parse_args()
UpperCAmelCase : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 313
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313
| 1
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase : Any = logging.getLogger()
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase_ = parser.parse_args()
return args.f
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_):
lowercase_ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase_ , 0.666)
@slow
@require_torch_non_multi_gpu
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase_)
lowercase_ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_)
lowercase_ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_)
| 313
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
lowercase_ = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("""RGB""" )
return image
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase_ = torch.cat((q_bias, torch.zeros_like(__lowerCAmelCase , requires_grad=__lowerCAmelCase ), v_bias) )
lowercase_ = qkv_bias
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = 3_64 if """coco""" in model_name else 2_24
lowercase_ = BlipaVisionConfig(image_size=__lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase_ = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=__lowerCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowercase_ = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=__lowerCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
lowercase_ = BlipaConfig(vision_config=__lowerCAmelCase , text_config=__lowerCAmelCase )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> int:
'''simple docstring'''
lowercase_ = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
lowercase_ = tokenizer("""\n""" , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase_ , lowercase_ = get_blipa_config(__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
lowercase_ = BlipaForConditionalGeneration(__lowerCAmelCase ).eval()
lowercase_ = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
lowercase_ , lowercase_ = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
lowercase_ , lowercase_ , lowercase_ = load_model_and_preprocess(
name=__lowerCAmelCase , model_type=__lowerCAmelCase , is_eval=__lowerCAmelCase , device=__lowerCAmelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase_ = original_model.state_dict()
lowercase_ = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ = state_dict.pop(__lowerCAmelCase )
if key.startswith("""Qformer.bert""" ):
lowercase_ = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase_ = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
lowercase_ = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase_ = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
lowercase_ = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
lowercase_ = key.replace("""t5""" , """language""" )
lowercase_ = val
# read in qv biases
read_in_q_v_bias(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ , lowercase_ = hf_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert len(__lowerCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase_ = load_demo_image()
lowercase_ = vis_processors["""eval"""](__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
lowercase_ = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(__lowerCAmelCase )
# create processor
lowercase_ = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = BlipaProcessor(image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
lowercase_ = processor(images=__lowerCAmelCase , return_tensors="""pt""" ).pixel_values.to(__lowerCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
original_model.to(__lowerCAmelCase )
hf_model.to(__lowerCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowercase_ = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
lowercase_ = hf_model(__lowerCAmelCase , __lowerCAmelCase ).logits
else:
lowercase_ = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
lowercase_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowercase_ = hf_model(__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase_ = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=__lowerCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase_ = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=__lowerCAmelCase )
else:
# cast to same type
lowercase_ = logits.dtype
assert torch.allclose(original_logits.to(__lowerCAmelCase ) , __lowerCAmelCase , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
lowercase_ = """"""
lowercase_ = tokenizer(__lowerCAmelCase , return_tensors="""pt""" ).input_ids.to(__lowerCAmelCase )
lowercase_ = original_model.generate({"""image""": original_pixel_values} )
lowercase_ = hf_model.generate(
__lowerCAmelCase , __lowerCAmelCase , do_sample=__lowerCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , __lowerCAmelCase )
lowercase_ = input_ids.shape[1]
lowercase_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowerCAmelCase )
lowercase_ = [text.strip() for text in output_text]
print("""HF generation:""" , __lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
UpperCAmelCase : Dict = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 313
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = 1_3
lowercase_ = 7
lowercase_ = True
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = 9_9
lowercase_ = 3_2
lowercase_ = 2
lowercase_ = 4
lowercase_ = 3_7
lowercase_ = """gelu"""
lowercase_ = 0.1
lowercase_ = 0.1
lowercase_ = 5_1_2
lowercase_ = 1_6
lowercase_ = 2
lowercase_ = 0.02
lowercase_ = 3
lowercase_ = 4
lowercase_ = None
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = TFDistilBertModel(config=lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ = model(lowerCAmelCase_)
lowercase_ = [input_ids, input_mask]
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = TFDistilBertForMaskedLM(config=lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = TFDistilBertForQuestionAnswering(config=lowerCAmelCase_)
lowercase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = TFDistilBertForSequenceClassification(lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = TFDistilBertForMultipleChoice(lowerCAmelCase_)
lowercase_ = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1) , (1, self.num_choices, 1))
lowercase_ = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1) , (1, self.num_choices, 1))
lowercase_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = TFDistilBertForTokenClassification(lowerCAmelCase_)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowercase__ = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = TFDistilBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , dim=3_7)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]):
lowercase_ = TFDistilBertModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""")
lowercase_ = tf.constant([[0, 1, 2, 3, 4, 5]])
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4)
| 313
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "data2vec-text"
def __init__( self : List[Any] , lowerCAmelCase_ : str=3_0_5_2_2 , lowerCAmelCase_ : Any=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : Union[str, Any]=3_0_7_2 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Optional[Any]=5_1_2 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Union[str, Any]=1E-12 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]="absolute" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 313
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _UpperCAmelCase ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Any):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase : int = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
lowercase_ = INVOICE_URL
lowercase_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase_) , lowerCAmelCase_ , """""")))
lowercase_ = """What is the placebo?"""
lowercase_ = [
{
"""image""": load_image(lowerCAmelCase_),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = dqa_pipeline(lowerCAmelCase_ , top_k=2)
self.assertEqual(
lowerCAmelCase_ , [
[
{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_), """start""": ANY(lowerCAmelCase_), """end""": ANY(lowerCAmelCase_)},
{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_), """start""": ANY(lowerCAmelCase_), """end""": ANY(lowerCAmelCase_)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""")
lowercase_ = INVOICE_URL
lowercase_ = """How many cats are there?"""
lowercase_ = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 3_8, """end""": 3_9},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 3_8, """end""": 4_0},
]
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4) , lowerCAmelCase_)
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4) , lowerCAmelCase_)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(lowerCAmelCase_ , [])
# We can optionnally pass directly the words and bounding boxes
lowercase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase_ = []
lowercase_ = []
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2)
self.assertEqual(lowerCAmelCase_ , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=5_0 , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCAmelCase_)
lowercase_ = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCAmelCase_ , revision="""3dc6de3""" , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowercase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
]
]
* 2 , )
lowercase_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase_) , lowerCAmelCase_ , """""")))
# This model should also work if `image` is set to None
lowercase_ = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCAmelCase_)
lowercase_ = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCAmelCase_ , revision="""3dc6de3""" , max_seq_len=5_0 , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowercase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
lowercase_ = list(zip(*apply_tesseract(load_image(lowerCAmelCase_) , lowerCAmelCase_ , """""")))
# This model should also work if `image` is set to None
lowercase_ = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
@slow
@require_torch
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""") , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
lowercase_ = INVOICE_URL
lowercase_ = """What is the invoice number?"""
lowercase_ = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4) , [{"""answer""": """us-001"""}])
@require_tf
@unittest.skip("""Document question answering not implemented in TF""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
pass
| 313
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase_ = model_type_to_module_name(__lowerCAmelCase )
lowercase_ = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(__lowerCAmelCase , __lowerCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__lowerCAmelCase , """__name__""" , __lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase_ = importlib.import_module("""transformers""" )
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
return getattr(__lowerCAmelCase , __lowerCAmelCase )
return None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , **__lowerCAmelCase , ) -> int:
'''simple docstring'''
lowercase_ = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(__lowerCAmelCase , encoding="""utf-8""" ) as reader:
return json.load(__lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any):
"""simple docstring"""
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""")
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase_)
def _UpperCAmelCase ( cls : List[Any] , lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = kwargs.pop("""config""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""trust_remote_code""" , lowerCAmelCase_)
lowercase_ = True
lowercase_ , lowercase_ = FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = config_dict.get("""feature_extractor_type""" , lowerCAmelCase_)
lowercase_ = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {}):
lowercase_ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_)
# It could be in `config.feature_extractor_type``
lowercase_ = getattr(lowerCAmelCase_ , """feature_extractor_type""" , lowerCAmelCase_)
if hasattr(lowerCAmelCase_ , """auto_map""") and "AutoFeatureExtractor" in config.auto_map:
lowercase_ = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
lowercase_ = feature_extractor_class_from_name(lowerCAmelCase_)
lowercase_ = feature_extractor_auto_map is not None
lowercase_ = feature_extractor_class is not None or type(lowerCAmelCase_) in FEATURE_EXTRACTOR_MAPPING
lowercase_ = resolve_trust_remote_code(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if has_remote_code and trust_remote_code:
lowercase_ = get_class_from_dynamic_module(
lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = kwargs.pop("""code_revision""" , lowerCAmelCase_)
if os.path.isdir(lowerCAmelCase_):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase_) in FEATURE_EXTRACTOR_MAPPING:
lowercase_ = FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase_)]
return feature_extractor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}''')
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase_ , lowerCAmelCase_)
| 313
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 1
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
return (-y * np.log(__lowerCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = np.dot(__lowerCAmelCase , __lowerCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCAmelCase ) ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=7_00_00 ) -> Dict:
'''simple docstring'''
lowercase_ = np.zeros(x.shape[1] )
for iterations in range(__lowerCAmelCase ):
lowercase_ = np.dot(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = sigmoid_function(__lowerCAmelCase )
lowercase_ = np.dot(x.T , h - y ) / y.size
lowercase_ = theta - alpha * gradient # updating the weights
lowercase_ = np.dot(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = sigmoid_function(__lowerCAmelCase )
lowercase_ = cost_function(__lowerCAmelCase , __lowerCAmelCase )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase : Tuple = datasets.load_iris()
UpperCAmelCase : str = iris.data[:, :2]
UpperCAmelCase : List[str] = (iris.target != 0) * 1
UpperCAmelCase : Any = 0.1
UpperCAmelCase : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("theta: ", theta) # printing the theta i.e our weights vector
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(__lowerCAmelCase , __lowerCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((UpperCAmelCase) , (UpperCAmelCase)) : List[str] = (x[:, 0].min(), x[:, 0].max())
((UpperCAmelCase) , (UpperCAmelCase)) : Optional[int] = (x[:, 1].min(), x[:, 1].max())
((UpperCAmelCase) , (UpperCAmelCase)) : Optional[int] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase : List[str] = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase : List[Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 313
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 1
|
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=10_24 , __lowerCAmelCase=10_24 , __lowerCAmelCase=False , **__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowercase_ = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="""train""" , **__lowerCAmelCase )
lowercase_ = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
lowercase_ = tqdm(
DataLoader(__lowerCAmelCase , batch_size=5_12 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowercase_ = []
for batch in dl:
lowercase_ = batch["""input_ids"""].ne(__lowerCAmelCase ).sum(1 ).tolist()
lowercase_ = batch["""labels"""].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
lowercase_ = get_lens(__lowerCAmelCase )
lowercase_ = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="""val""" , **__lowerCAmelCase )
lowercase_ = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 313
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = credit_card_number
lowercase_ = 0
lowercase_ = len(__lowerCAmelCase ) - 2
for i in range(__lowerCAmelCase , -1 , -2 ):
# double the value of every second digit
lowercase_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowercase_ = cc_number[:i] + str(__lowerCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__lowerCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__lowerCAmelCase ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__lowerCAmelCase ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__lowerCAmelCase ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 313
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313
| 1
|
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = tmp_path_factory.getbasetemp() / """cache"""
lowercase_ = test_hf_cache_home / """datasets"""
lowercase_ = test_hf_cache_home / """metrics"""
lowercase_ = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(__lowerCAmelCase ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(__lowerCAmelCase ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(__lowerCAmelCase ) )
lowercase_ = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(__lowerCAmelCase ) )
lowercase_ = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__lowerCAmelCase ) )
@pytest.fixture(autouse=__lowerCAmelCase , scope="""session""" )
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , __lowerCAmelCase )
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , __lowerCAmelCase )
| 313
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
metadata={"help": "The output directory where the model will be written."} , )
lowercase__ = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
lowercase__ = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments,) )
((lowercase_) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase_ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase_ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase_ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase_ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase_ = True
lowercase_ = True
lowercase_ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__lowerCAmelCase , decoder_config=__lowerCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase_ = decoder_config.decoder_start_token_id
lowercase_ = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase_ = decoder_config.bos_token_id
if pad_token_id is None:
lowercase_ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase_ = decoder_config.eos_token_id
lowercase_ = decoder_start_token_id
lowercase_ = pad_token_id
lowercase_ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase_ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase_ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 313
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 1
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 313
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
UpperCAmelCase : Tuple = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
UpperCAmelCase : Optional[int] = "▁"
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple="<s>" , lowerCAmelCase_ : int="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Optional[Any]="<pad>" , lowerCAmelCase_ : Optional[int]="<mask>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowercase_ = vocab_file
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase_))
lowercase_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowercase_ = len(self.sp_model) - 1
lowercase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_)) + [1]
return [1] + ([0] * len(lowerCAmelCase_)) + [1, 1] + ([0] * len(lowerCAmelCase_)) + [1]
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
return len(self.sp_model)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = {self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ = self.sp_model.PieceToId(lowerCAmelCase_)
return spm_id if spm_id else self.unk_token_id
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = []
lowercase_ = """"""
lowercase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_) + token
lowercase_ = True
lowercase_ = []
else:
current_sub_tokens.append(lowerCAmelCase_)
lowercase_ = False
out_string += self.sp_model.decode(lowerCAmelCase_)
return out_string.strip()
def __getstate__( self : Tuple):
"""simple docstring"""
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , """wb""") as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
| 313
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase_ , lowercase_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase : Dict = list(range(10, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 313
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 1
|
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
return np.dot(__lowerCAmelCase , __lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , *,
lowerCAmelCase_ : float = np.inf , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : float = 0.0 , ):
"""simple docstring"""
lowercase_ = regularization
lowercase_ = gamma
if kernel == "linear":
lowercase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""")
if not isinstance(self.gamma , (float, int)):
raise ValueError("""gamma must be float or int""")
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""")
lowercase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase_ = F'''Unknown kernel: {kernel}'''
raise ValueError(lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : ndarray , lowerCAmelCase_ : ndarray):
"""simple docstring"""
return np.dot(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : ndarray , lowerCAmelCase_ : ndarray):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : list[ndarray] , lowerCAmelCase_ : ndarray):
"""simple docstring"""
lowercase_ = observations
lowercase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase_) , ) = np.shape(lowerCAmelCase_)
def to_minimize(lowerCAmelCase_ : ndarray) -> float:
lowercase_ = 0
((lowercase_) , ) = np.shape(lowerCAmelCase_)
for i in range(lowerCAmelCase_):
for j in range(lowerCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowerCAmelCase_)
lowercase_ = LinearConstraint(lowerCAmelCase_ , 0 , 0)
lowercase_ = Bounds(0 , self.regularization)
lowercase_ = minimize(
lowerCAmelCase_ , np.ones(lowerCAmelCase_) , bounds=lowerCAmelCase_ , constraints=[ly_contraint]).x
lowercase_ = l_star
# calculating mean offset of separation plane to points
lowercase_ = 0
for i in range(lowerCAmelCase_):
for j in range(lowerCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
lowercase_ = s / n
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : ndarray):
"""simple docstring"""
lowercase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowerCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
| 1
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
UpperCAmelCase : int = True
from torch.cuda.amp import autocast
UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Whether to log verbose messages or not."} , )
lowercase__ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowercase__ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowercase__ = field(
default=0.99_99_95 , metadata={"help": "Decay of gumbel temperature during training."} )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase_ = logging.WARNING
if model_args.verbose_logging:
lowercase_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase_ = logging.INFO
logger.setLevel(__lowerCAmelCase )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase__ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase__ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowercase__ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowercase__ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowercase__ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = "longest"
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[int] , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]]):
"""simple docstring"""
lowercase_ = self.feature_extractor.pad(
lowerCAmelCase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
lowercase_ = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1])
lowercase_ = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase_ = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1)).to(
torch.long)
lowercase_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase_ = 1
lowercase_ = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
lowercase_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCAmelCase_ , min_masks=2 , )
return batch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : int , *lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : str=1.0 , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = 0
lowercase_ = max_gumbel_temp
lowercase_ = min_gumbel_temp
lowercase_ = gumbel_temp_decay
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]]):
"""simple docstring"""
model.train()
lowercase_ = self._prepare_inputs(lowerCAmelCase_)
if self.use_amp:
with autocast():
lowercase_ = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_)
else:
lowercase_ = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase_ = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''')
if self.args.gradient_accumulation_steps > 1:
lowercase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
return loss.detach()
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
configure_logger(__lowerCAmelCase , __lowerCAmelCase )
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCAmelCase )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
lowercase_ , lowercase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase_ = datasets.map(
__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
lowercase_ = vectorized_datasets.filter(
lambda __lowerCAmelCase : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCAmelCase ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase_ = vectorized_datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
lowercase_ = WavaVecaForPreTraining(__lowerCAmelCase )
lowercase_ = DataCollatorForWavaVecaPretraining(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
lowercase_ = WavaVecaPreTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=__lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 313
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "blenderbot-small"
lowercase__ = ["past_key_values"]
lowercase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any]=5_0_2_6_5 , lowerCAmelCase_ : Union[str, Any]=5_1_2 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Optional[int]=2_0_4_8 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : List[str]=8 , lowerCAmelCase_ : Optional[Any]=2_0_4_8 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=5_1_2 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=2 , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = d_model
lowercase_ = encoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = encoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = use_cache
lowercase_ = encoder_layers
lowercase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase_ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
])
if self.use_past:
lowercase_ = {0: """batch"""}
lowercase_ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase_ = {0: """batch""", 1: """decoder_sequence"""}
lowercase_ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="""inputs""")
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase_ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
])
if self.use_past:
lowercase_ , lowercase_ = self.num_layers
for i in range(lowerCAmelCase_):
lowercase_ = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase_ = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowercase_ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
])
return common_inputs
@property
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase_ = super().outputs
else:
lowercase_ = super(lowerCAmelCase_ , self).outputs
if self.use_past:
lowercase_ , lowercase_ = self.num_layers
for i in range(lowerCAmelCase_):
lowercase_ = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase_ = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
lowercase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# Generate decoder inputs
lowercase_ = seq_length if not self.use_past else 1
lowercase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowercase_ = dict(**lowerCAmelCase_ , **lowerCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
lowercase_ , lowercase_ = common_inputs["""input_ids"""].shape
lowercase_ = common_inputs["""decoder_input_ids"""].shape[1]
lowercase_ , lowercase_ = self.num_attention_heads
lowercase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase_ = decoder_seq_length + 3
lowercase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase_ = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase_ , lowerCAmelCase_)] , dim=1)
lowercase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase_ , lowercase_ = self.num_layers
lowercase_ = min(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = max(lowerCAmelCase_ , lowerCAmelCase_) - min_num_layers
lowercase_ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCAmelCase_):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase_),
torch.zeros(lowerCAmelCase_),
torch.zeros(lowerCAmelCase_),
torch.zeros(lowerCAmelCase_),
))
# TODO: test this.
lowercase_ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCAmelCase_ , lowerCAmelCase_):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase_), torch.zeros(lowerCAmelCase_)))
return common_inputs
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
lowercase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
lowercase_ , lowercase_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase_ = seqlen + 2
lowercase_ , lowercase_ = self.num_layers
lowercase_ , lowercase_ = self.num_attention_heads
lowercase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase_ = common_inputs["""attention_mask"""].dtype
lowercase_ = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_)] , dim=1)
lowercase_ = [
(torch.zeros(lowerCAmelCase_), torch.zeros(lowerCAmelCase_)) for _ in range(lowerCAmelCase_)
]
return common_inputs
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = tokenizer.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join([tokenizer.unk_token]) * seq_length] * batch_size
lowercase_ = dict(tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
return common_inputs
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_)
elif self.task == "causal-lm":
lowercase_ = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_)
else:
lowercase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_)
return common_inputs
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase_ = super()._flatten_past_key_values_(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
else:
lowercase_ = super(lowerCAmelCase_ , self)._flatten_past_key_values_(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
| 313
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 313
| 1
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
UpperCAmelCase : Union[str, Any] = F"https://www.google.com/search?q={query}&num=100"
UpperCAmelCase : Tuple = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
UpperCAmelCase : List[Any] = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
UpperCAmelCase : Any = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 313
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313
| 1
|
"""simple docstring"""
from math import pow, sqrt
def _SCREAMING_SNAKE_CASE (*__lowerCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase ) > 0 and all(value > 0.0 for value in values )
return result
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 313
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Tuple = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313
| 1
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
super().setUp()
lowercase_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = """こんにちは、世界。 \nこんばんは、世界。"""
lowercase_ = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.get_input_output_texts(lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_)
return text, ids
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass # TODO add if relevant
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
pass # TODO add if relevant
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
pass # TODO add if relevant
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.tokenizer_class(self.vocab_file)
lowercase_ = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""")
self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""")
self.assertIsNotNone(lowerCAmelCase_)
lowercase_ = """こんにちは、世界。\nこんばんは、世界。"""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
lowercase_ = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase_ , """wb""") as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_)
with open(lowerCAmelCase_ , """rb""") as handle:
lowercase_ = pickle.load(lowerCAmelCase_)
lowercase_ = tokenizer_new.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = MecabTokenizer(mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def _UpperCAmelCase ( self : int):
"""simple docstring"""
try:
lowercase_ = MecabTokenizer(mecab_dic="""unidic_lite""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
try:
lowercase_ = MecabTokenizer(mecab_dic="""unidic""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
try:
lowercase_ = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""")
self.assertIsNotNone(lowerCAmelCase_)
lowercase_ = """こんにちは、世界。\nこんばんは、世界。"""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
lowercase_ = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase_ , """wb""") as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_)
with open(lowerCAmelCase_ , """rb""") as handle:
lowercase_ = pickle.load(lowerCAmelCase_)
lowercase_ = tokenizer_new.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@require_sudachi
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = SudachiTokenizer(sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""])
@require_sudachi
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""])
@require_sudachi
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""])
@require_sudachi
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""")
self.assertIsNotNone(lowerCAmelCase_)
lowercase_ = """こんにちは、世界。\nこんばんは、世界。"""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
lowercase_ = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase_ , """wb""") as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_)
with open(lowerCAmelCase_ , """rb""") as handle:
lowercase_ = pickle.load(lowerCAmelCase_)
lowercase_ = tokenizer_new.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@require_jumanpp
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = JumanppTokenizer(do_lower_case=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = JumanppTokenizer(normalize_text=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = JumanppTokenizer(trim_whitespace=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
lowercase_ = {}
for i, token in enumerate(lowerCAmelCase_):
lowercase_ = i
lowercase_ = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""])
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""")
lowercase_ = tokenizer.subword_tokenizer
lowercase_ = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""")
self.assertListEqual(lowerCAmelCase_ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""])
lowercase_ = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""")
self.assertListEqual(lowerCAmelCase_ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""])
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""")
lowercase_ = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase_)
lowercase_ = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase_)
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_)
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
super().setUp()
lowercase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def _UpperCAmelCase ( self : Tuple , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = """こんにちは、世界。 \nこんばんは、世界。"""
lowercase_ = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def _UpperCAmelCase ( self : str):
"""simple docstring"""
pass # TODO add if relevant
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
pass # TODO add if relevant
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass # TODO add if relevant
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""")
lowercase_ = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""")
self.assertListEqual(
lowerCAmelCase_ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowercase_ = {}
for i, token in enumerate(lowerCAmelCase_):
lowercase_ = i
lowercase_ = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""])
self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""])
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""")
lowercase_ = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase_)
lowercase_ = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase_)
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_)
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """cl-tohoku/bert-base-japanese"""
lowercase_ = AutoTokenizer.from_pretrained(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
lowercase_ = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
| 313
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowercase_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = 2
while digits < n:
index += 1
lowercase_ = len(str(fibonacci(__lowerCAmelCase ) ) )
return index
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(__lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 313
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 1
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=1_3 , lowerCAmelCase_ : Any=3_0 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=3_2 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[int]=1_0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[int]=2 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = DeiTModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = DeiTForMaskedImageModeling(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase_ = model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = DeiTModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""")
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear))
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase_)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
lowercase_ = model(**lowerCAmelCase_).loss
loss.backward()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(lowerCAmelCase_)
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_)
model.train()
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
lowercase_ = model(**lowerCAmelCase_).loss
loss.backward()
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase_),
*get_values(lowerCAmelCase_),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}'''):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1).repeat(1 , problem_type["""num_labels"""])
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase_) as warning_list:
lowercase_ = model(**lowerCAmelCase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''')
loss.backward()
@slow
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""")
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""").to(
lowerCAmelCase_)
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""").to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
lowercase_ = model(**lowerCAmelCase_)
# verify the logits
lowercase_ = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""")
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""")
lowercase_ = inputs.pixel_values.to(lowerCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)
| 313
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
import argparse
import os
import re
UpperCAmelCase : Union[str, Any] = "src/transformers"
# Pattern that looks at the indentation in a line.
UpperCAmelCase : Any = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase : int = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase : List[str] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase : List[str] = re.compile(r"\[([^\]]+)\]")
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = _re_indent.search(__lowerCAmelCase )
return "" if search is None else search.groups()[0]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[Any]:
'''simple docstring'''
lowercase_ = 0
lowercase_ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__lowerCAmelCase ):
index += 1
lowercase_ = ["""\n""".join(lines[:index] )]
else:
lowercase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase_ = [lines[index]]
index += 1
while index < len(__lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__lowerCAmelCase ) )
if index < len(__lowerCAmelCase ) - 1:
lowercase_ = [lines[index + 1]]
index += 1
else:
lowercase_ = []
else:
blocks.append("""\n""".join(__lowerCAmelCase ) )
lowercase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__lowerCAmelCase ) > 0:
blocks.append("""\n""".join(__lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def _inner(__lowerCAmelCase ):
return key(__lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
def noop(__lowerCAmelCase ):
return x
if key is None:
lowercase_ = noop
# Constants are all uppercase, they go first.
lowercase_ = [obj for obj in objects if key(__lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase_ = [obj for obj in objects if key(__lowerCAmelCase )[0].isupper() and not key(__lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase_ = [obj for obj in objects if not key(__lowerCAmelCase )[0].isupper()]
lowercase_ = ignore_underscore(__lowerCAmelCase )
return sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def _replace(__lowerCAmelCase ):
lowercase_ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
lowercase_ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__lowerCAmelCase )] ) + "]"
lowercase_ = import_statement.split("""\n""" )
if len(__lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase_ = 2 if lines[1].strip() == """[""" else 1
lowercase_ = [(i, _re_strip_line.search(__lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase_ = sort_objects(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] )
lowercase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase_ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ = keys[:-1]
lowercase_ = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(__lowerCAmelCase )] )
return "\n".join(__lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
lowercase_ = _re_bracket_content.sub(_replace , __lowerCAmelCase )
return import_statement
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=True ) -> Dict:
'''simple docstring'''
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
lowercase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase_ = split_code_in_indented_blocks(
__lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase_ = main_blocks[block_idx]
lowercase_ = block.split("""\n""" )
# Get to the start of the imports.
lowercase_ = 0
while line_idx < len(__lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase_ = len(__lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(__lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase_ = """\n""".join(block_lines[line_idx:-1] )
lowercase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase_ = split_code_in_indented_blocks(__lowerCAmelCase , indent_level=__lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase_ = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase_ = [(pattern.search(__lowerCAmelCase ).groups()[0] if pattern.search(__lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase_ = [(i, key) for i, key in enumerate(__lowerCAmelCase ) if key is not None]
lowercase_ = [x[0] for x in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase_ = 0
lowercase_ = []
for i in range(len(__lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
lowercase_ = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__lowerCAmelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=True ) -> int:
'''simple docstring'''
lowercase_ = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
lowercase_ = sort_imports(os.path.join(__lowerCAmelCase , """__init__.py""" ) , check_only=__lowerCAmelCase )
if result:
lowercase_ = [os.path.join(__lowerCAmelCase , """__init__.py""" )]
if len(__lowerCAmelCase ) > 0:
raise ValueError(F'''Would overwrite {len(__lowerCAmelCase )} files, run `make style`.''' )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCAmelCase : Optional[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 313
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
lowercase_ = [int(__lowerCAmelCase ) for i in num_string]
lowercase_ = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
lowercase_ = str(__lowerCAmelCase )
steps += 1
return steps
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
lowercase_ = [int(__lowerCAmelCase ) for i in num_string]
lowercase_ = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
lowercase_ = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 1
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Tuple = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = field(default_factory=__UpperCAmelCase )
lowercase__ = field(default_factory=__UpperCAmelCase )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tensor , lowerCAmelCase_ : Tensor):
"""simple docstring"""
lowercase_ = len(list(m.modules())) == 1 or isinstance(lowerCAmelCase_ , nn.Convad) or isinstance(lowerCAmelCase_ , nn.BatchNormad)
if has_not_submodules:
self.traced.append(lowerCAmelCase_)
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Tensor):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(lowerCAmelCase_)
[x.remove() for x in self.handles]
return self
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return list(filter(lambda lowerCAmelCase_: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = 1
lowercase__ = field(default_factory=__UpperCAmelCase )
lowercase__ = field(default_factory=__UpperCAmelCase )
lowercase__ = True
def __call__( self : Optional[Any] , lowerCAmelCase_ : Tensor):
"""simple docstring"""
lowercase_ = Tracker(self.dest)(lowerCAmelCase_).parametrized
lowercase_ = Tracker(self.src)(lowerCAmelCase_).parametrized
lowercase_ = list(filter(lambda lowerCAmelCase_: type(lowerCAmelCase_) not in self.src_skip , lowerCAmelCase_))
lowercase_ = list(filter(lambda lowerCAmelCase_: type(lowerCAmelCase_) not in self.dest_skip , lowerCAmelCase_))
if len(lowerCAmelCase_) != len(lowerCAmelCase_) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase_)} operations while'''
F''' destination module has {len(lowerCAmelCase_)}.''')
for dest_m, src_m in zip(lowerCAmelCase_ , lowerCAmelCase_):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''')
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : nn.Module):
"""simple docstring"""
super().__init__()
lowercase_ = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block"""), F'''Unexpected layer name {k}'''
lowercase_ = len(lowerCAmelCase_) + 1
feature_blocks.append((F'''res{block_index}''', v))
lowercase_ = nn.ModuleDict(lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Tensor):
"""simple docstring"""
return get_trunk_forward_outputs(
lowerCAmelCase_ , out_feat_keys=lowerCAmelCase_ , feature_blocks=self._feature_blocks , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = x.split("""-""")
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : List[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
if x not in self:
lowercase_ = self.convert_name_to_timm(lowerCAmelCase_)
lowercase_ = partial(lambda: (timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_).eval(), None))
else:
lowercase_ = super().__getitem__(lowerCAmelCase_)
return val
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __getitem__( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
lowercase_ = RegNetModel
else:
lowercase_ = RegNetForImageClassification
return val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for from_key, to_key in keys:
lowercase_ = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , ) -> Dict:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
lowercase_ , lowercase_ = from_model_func()
lowercase_ = our_model_func(__lowerCAmelCase ).eval()
lowercase_ = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase , raise_if_mismatch=__lowerCAmelCase )
lowercase_ = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(__lowerCAmelCase )
if from_state_dict is not None:
lowercase_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowercase_ = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
lowercase_ = manually_copy_vissl_head(__lowerCAmelCase , our_model.state_dict() , __lowerCAmelCase )
our_model.load_state_dict(__lowerCAmelCase )
lowercase_ = our_model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
lowercase_ = (
our_outputs.logits if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else our_outputs.last_hidden_state
)
lowercase_ = from_model(__lowerCAmelCase )
lowercase_ = from_output[-1] if type(__lowerCAmelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowercase_ = our_outputs.hidden_states[-1]
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=__lowerCAmelCase , )
lowercase_ = 2_24 if """seer""" not in name else 3_84
# we can use the convnext one
lowercase_ = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=__lowerCAmelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=__lowerCAmelCase , )
print(F'''Pushed {name}''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = 10_00
lowercase_ = (1, num_labels)
lowercase_ = """huggingface/label-files"""
lowercase_ = num_labels
lowercase_ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
lowercase_ = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
lowercase_ = NameToOurModelFuncMap()
lowercase_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowerCAmelCase , __lowerCAmelCase ) -> Tuple[nn.Module, Dict]:
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , model_dir=str(__lowerCAmelCase ) , map_location="""cpu""" )
lowercase_ = model_func()
# check if we have a head, if yes add it
lowercase_ = files["""classy_state_dict"""]["""base_model"""]["""model"""]
lowercase_ = model_state_dict["""trunk"""]
model.load_state_dict(__lowerCAmelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
lowercase_ = partial(
__lowerCAmelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase_ = partial(
__lowerCAmelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase_ = partial(
__lowerCAmelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase_ = partial(
__lowerCAmelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowercase_ = partial(
__lowerCAmelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase_ = partial(
__lowerCAmelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase_ = partial(
__lowerCAmelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase_ = partial(
__lowerCAmelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
UpperCAmelCase : Optional[int] = parser.parse_args()
UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 313
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313
| 1
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Optional[Any] = "▁"
UpperCAmelCase : int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BigBirdTokenizer
lowercase__ = BigBirdTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
super().setUp()
lowercase_ = self.tokenizer_class(lowerCAmelCase_ , keep_accents=lowerCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """<s>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """[MASK]""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_0_4)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = BigBirdTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_)
lowercase_ = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowercase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
@slow
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = """Hello World!"""
lowercase_ = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_))
@slow
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_))
@require_torch
@slow
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase_ = list(self.big_tokenizer.get_vocab().keys())[:1_0]
lowercase_ = """ """.join(lowerCAmelCase_)
lowercase_ = self.big_tokenizer.encode_plus(lowerCAmelCase_ , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase_)
lowercase_ = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase_)
lowercase_ = BigBirdConfig(attention_type="""original_full""")
lowercase_ = BigBirdModel(lowerCAmelCase_)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase_)
model(**lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
lowercase_ = tokenizer.decode(tokenizer("""Paris is the [MASK].""").input_ids)
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""")
@slow
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 313
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Tuple=3_7 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Any=5_1_2 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Optional[int]=4 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_choices
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_attention_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = True
lowercase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxRoFormerModelTester(self)
@slow
def _UpperCAmelCase ( self : str):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=lowerCAmelCase_)
lowercase_ = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCAmelCase_)
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""")
lowercase_ = jnp.array([[0, 1, 2, 3, 4, 5]])
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = 5_0_0_0_0
lowercase_ = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4))
| 313
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Dict=1_0 , lowerCAmelCase_ : Any=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : int=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : int=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = embeddings_size
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_act
lowercase_ = num_labels
lowercase_ = scope
lowercase_ = len(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase_ = self.get_config()
return config, pixel_values
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = FlaxRegNetModel(config=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = FlaxRegNetForImageClassification(config=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = FlaxRegNetModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
@unittest.skip(reason="""RegNet does not use inputs_embeds""")
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
lowercase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_) , expected_num_stages + 1)
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any]):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = model_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = model_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""") if is_vision_available() else None
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""")
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""np""")
lowercase_ = model(**lowerCAmelCase_)
# verify the logits
lowercase_ = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
lowercase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4))
| 313
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "MCTCTFeatureExtractor"
lowercase__ = "AutoTokenizer"
def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.feature_extractor
lowercase_ = False
def __call__( self : str , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_)
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""")
lowercase_ = kwargs.pop("""raw_speech""")
else:
lowercase_ = kwargs.pop("""audio""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""sampling_rate""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""text""" , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
lowercase_ = args[0]
lowercase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if audio is not None:
lowercase_ = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_)
if text is not None:
lowercase_ = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase_ = encodings["""input_ids"""]
return inputs
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Any):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Any , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Any):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = kwargs.pop("""input_features""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""labels""" , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
lowercase_ = args[0]
lowercase_ = args[1:]
if input_features is not None:
lowercase_ = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_)
if labels is not None:
lowercase_ = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase_ = labels["""input_ids"""]
return input_features
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
@contextmanager
def _UpperCAmelCase ( self : int):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""")
lowercase_ = True
lowercase_ = self.tokenizer
yield
lowercase_ = self.feature_extractor
lowercase_ = False
| 313
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = ["input_features"]
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict=8_0 , lowerCAmelCase_ : Optional[Any]=1_6_0_0_0 , lowerCAmelCase_ : List[str]=1_6_0 , lowerCAmelCase_ : List[str]=3_0 , lowerCAmelCase_ : List[str]=4_0_0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=False , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = n_fft
lowercase_ = hop_length
lowercase_ = chunk_length
lowercase_ = chunk_length * sampling_rate
lowercase_ = self.n_samples // hop_length
lowercase_ = sampling_rate
lowercase_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase_ , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=lowerCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : np.array):
"""simple docstring"""
lowercase_ = spectrogram(
lowerCAmelCase_ , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowercase_ = log_spec[:, :-1]
lowercase_ = np.maximum(lowerCAmelCase_ , log_spec.max() - 8.0)
lowercase_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _UpperCAmelCase ( lowerCAmelCase_ : List[np.ndarray] , lowerCAmelCase_ : List[np.ndarray] , lowerCAmelCase_ : float = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowercase_ = np.array(lowerCAmelCase_ , np.intaa)
lowercase_ = []
for vector, length in zip(lowerCAmelCase_ , attention_mask.sum(-1)):
lowercase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowercase_ = padding_value
normed_input_values.append(lowerCAmelCase_)
else:
lowercase_ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[str] = "max_length" , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowercase_ = isinstance(lowerCAmelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''')
lowercase_ = is_batched_numpy or (
isinstance(lowerCAmelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowercase_ = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray):
lowercase_ = np.asarray(lowerCAmelCase_ , dtype=np.floataa)
elif isinstance(lowerCAmelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowercase_ = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowercase_ = [np.asarray([raw_speech]).T]
lowercase_ = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowercase_ = self.pad(
lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=max_length if max_length else self.n_samples , truncation=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase_ = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowercase_ = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowercase_ = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowercase_ = [self._np_extract_fbank_features(lowerCAmelCase_) for waveform in input_features[0]]
if isinstance(input_features[0] , lowerCAmelCase_):
lowercase_ = [np.asarray(lowerCAmelCase_ , dtype=np.floataa) for feature in input_features]
else:
lowercase_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase_ = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowercase_ = padded_inputs.convert_to_tensors(lowerCAmelCase_)
return padded_inputs
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = copy.deepcopy(self.__dict__)
lowercase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 313
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 1
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCAmelCase : Optional[Any] = "CompVis/stable-diffusion-v1-1"
UpperCAmelCase : List[str] = "CompVis/stable-diffusion-v1-2"
UpperCAmelCase : str = "CompVis/stable-diffusion-v1-3"
UpperCAmelCase : Tuple = "CompVis/stable-diffusion-v1-4"
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[int] , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : StableDiffusionSafetyChecker , lowerCAmelCase_ : CLIPImageProcessor , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
super()._init_()
lowercase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_)
lowercase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_)
lowercase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_)
lowercase_ = StableDiffusionPipeline(
vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , requires_safety_checker=lowerCAmelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return {k: getattr(self , lowerCAmelCase_) for k in self.config.keys() if not k.startswith("""_""")}
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[Union[str, int]] = "auto"):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase_)
@torch.no_grad()
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_0 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_0 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_0 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : str , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_0 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_0 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
lowercase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowerCAmelCase_)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''')
# Get first result from Stable Diffusion Checkpoint v1.1
lowercase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowercase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowercase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowercase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 313
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 1
|
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str]):
"""simple docstring"""
return F'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCAmelCase_) for s in shape])}.npy'''
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : int=(4, 4, 6_4, 6_4) , lowerCAmelCase_ : Any=False):
"""simple docstring"""
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_)) , dtype=lowerCAmelCase_)
return image
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Dict="CompVis/stable-diffusion-v1-4"):
"""simple docstring"""
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = """bf16""" if fpaa else None
lowercase_ , lowercase_ = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase_ , subfolder="""unet""" , dtype=lowerCAmelCase_ , revision=lowerCAmelCase_)
return model, params
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : str=(4, 7_7, 7_6_8) , lowerCAmelCase_ : str=False):
"""simple docstring"""
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_)) , dtype=lowerCAmelCase_)
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[1_7, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1_0_0_0, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
])
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=lowerCAmelCase_)
lowercase_ = self.get_latents(lowerCAmelCase_ , fpaa=lowerCAmelCase_)
lowercase_ = self.get_encoder_hidden_states(lowerCAmelCase_ , fpaa=lowerCAmelCase_)
lowercase_ = model.apply(
{"""params""": params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
lowercase_ = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-2)
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[1_7, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1_0_0_0, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
])
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ , lowercase_ = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=lowerCAmelCase_)
lowercase_ = self.get_latents(lowerCAmelCase_ , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase_)
lowercase_ = self.get_encoder_hidden_states(lowerCAmelCase_ , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase_)
lowercase_ = model.apply(
{"""params""": params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
lowercase_ = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-2)
| 313
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 1
|
"""simple docstring"""
UpperCAmelCase : Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 313
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 1
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase : int = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
lowercase_ = Github(os.environ["""GITHUB_TOKEN"""] )
lowercase_ = g.get_repo("""huggingface/diffusers""" )
lowercase_ = repo.get_issues(state="""open""" )
for issue in open_issues:
lowercase_ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
lowercase_ = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 313
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 1
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
lowercase_ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ = True
for i in range(0 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ = False
for i in range(1 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
UpperCAmelCase : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCAmelCase : List[str] = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 313
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = value
lowercase_ = None
lowercase_ = None
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , lowerCAmelCase_ : Node):
"""simple docstring"""
lowercase_ = tree
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Node | None):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self : Optional[int]):
"""simple docstring"""
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 313
| 1
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
lowercase_ = [True] * n
lowercase_ = False
lowercase_ = False
lowercase_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase_ = i * 2
while index < n:
lowercase_ = False
lowercase_ = index + i
lowercase_ = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 99_99_66_66_33_33 ) -> int:
'''simple docstring'''
lowercase_ = math.floor(math.sqrt(__lowerCAmelCase ) ) + 1_00
lowercase_ = prime_sieve(__lowerCAmelCase )
lowercase_ = 0
lowercase_ = 0
lowercase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowercase_ = primes[prime_index + 1]
lowercase_ = last_prime**2
lowercase_ = next_prime**2
# Get numbers divisible by lps(current)
lowercase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 313
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10_00 ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ = 1, 1
lowercase_ = []
for i in range(1 , n + 1 ):
lowercase_ = prev_numerator + 2 * prev_denominator
lowercase_ = prev_numerator + prev_denominator
if len(str(__lowerCAmelCase ) ) > len(str(__lowerCAmelCase ) ):
result.append(__lowerCAmelCase )
lowercase_ = numerator
lowercase_ = denominator
return len(__lowerCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 313
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 1
|
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
lowercase__ = None
lowercase__ = "utf-8"
lowercase__ = None
lowercase__ = None
lowercase__ = True # deprecated
lowercase__ = None # deprecated
lowercase__ = 10 << 20 # 10MB
lowercase__ = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
lowercase__ = JsonConfig
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""")
lowercase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""")
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""")
return datasets.DatasetInfo(features=self.config.features)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''')
lowercase_ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase_ , (str, list, tuple)):
lowercase_ = data_files
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(lowerCAmelCase_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
lowercase_ = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(lowerCAmelCase_) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase_ , gen_kwargs={"""files""": files}))
return splits
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : pa.Table):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
lowercase_ = self.config.features.arrow_schema.field(lowerCAmelCase_).type
lowercase_ = pa_table.append_column(lowerCAmelCase_ , pa.array([None] * len(lowerCAmelCase_) , type=lowerCAmelCase_))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ = table_cast(lowerCAmelCase_ , self.config.features.arrow_schema)
return pa_table
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase_)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
lowercase_ = json.load(lowerCAmelCase_)
# We keep only the field we are interested in
lowercase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCAmelCase_ , (list, tuple)):
lowercase_ = set().union(*[row.keys() for row in dataset])
lowercase_ = {col: [row.get(lowerCAmelCase_) for row in dataset] for col in keys}
else:
lowercase_ = dataset
lowercase_ = pa.Table.from_pydict(lowerCAmelCase_)
yield file_idx, self._cast_table(lowerCAmelCase_)
# If the file has one json object per line
else:
with open(lowerCAmelCase_ , """rb""") as f:
lowercase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase_ = max(self.config.chunksize // 3_2 , 1_6 << 1_0)
lowercase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
lowercase_ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCAmelCase_)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase_ = batch.decode(self.config.encoding , errors=lowerCAmelCase_).encode("""utf-8""")
try:
while True:
try:
lowercase_ = paj.read_json(
io.BytesIO(lowerCAmelCase_) , read_options=paj.ReadOptions(block_size=lowerCAmelCase_))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCAmelCase_ , pa.ArrowInvalid)
and "straddling" not in str(lowerCAmelCase_)
or block_size > len(lowerCAmelCase_)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(lowerCAmelCase_)} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''')
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
lowercase_ = json.load(lowerCAmelCase_)
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase_)}: {e}''')
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCAmelCase_ , lowerCAmelCase_): # list is the only sequence type supported in JSON
try:
lowercase_ = set().union(*[row.keys() for row in dataset])
lowercase_ = {col: [row.get(lowerCAmelCase_) for row in dataset] for col in keys}
lowercase_ = pa.Table.from_pydict(lowerCAmelCase_)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase_)}: {e}''')
raise ValueError(F'''Not able to read records in the JSON file at {file}.''') from None
yield file_idx, self._cast_table(lowerCAmelCase_)
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase_)}: {e}''')
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys()))}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''') from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase_)
batch_idx += 1
| 313
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313
| 1
|
"""simple docstring"""
UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCAmelCase : Optional[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCAmelCase : Tuple = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 313
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
"""simple docstring"""
from maths.prime_check import is_prime
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowerCAmelCase )
if is_prime(__lowerCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = KandinskyImgaImgPipeline
lowercase__ = ["prompt", "image_embeds", "negative_image_embeds", "image"]
lowercase__ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
lowercase__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase__ = False
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return 3_2
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return 3_2
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return self.time_input_dim
@property
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return 1_0_0
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""")
return tokenizer
@property
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
torch.manual_seed(0)
lowercase_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
lowercase_ = MultilingualCLIP(lowerCAmelCase_)
lowercase_ = text_encoder.eval()
return text_encoder
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
lowercase_ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase_ = UNetaDConditionModel(**lowerCAmelCase_)
return model
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
torch.manual_seed(0)
lowercase_ = VQModel(**self.dummy_movq_kwargs)
return model
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.dummy_text_encoder
lowercase_ = self.dummy_tokenizer
lowercase_ = self.dummy_unet
lowercase_ = self.dummy_movq
lowercase_ = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowercase_ = DDIMScheduler(**lowerCAmelCase_)
lowercase_ = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any]=0):
"""simple docstring"""
lowercase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
lowercase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowerCAmelCase_)
# create init_image
lowercase_ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
lowercase_ = Image.fromarray(np.uinta(lowerCAmelCase_)).convert("""RGB""").resize((2_5_6, 2_5_6))
if str(lowerCAmelCase_).startswith("""mps"""):
lowercase_ = torch.manual_seed(lowerCAmelCase_)
else:
lowercase_ = torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
lowercase_ = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**lowerCAmelCase_)
lowercase_ = pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = pipe(**self.get_dummy_inputs(lowerCAmelCase_))
lowercase_ = output.images
lowercase_ = pipe(
**self.get_dummy_inputs(lowerCAmelCase_) , return_dict=lowerCAmelCase_ , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""")
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
lowercase_ = """A red cartoon frog, 4k"""
lowercase_ = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase_)
lowercase_ = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa)
lowercase_ = pipeline.to(lowerCAmelCase_)
pipeline.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = torch.Generator(device="""cpu""").manual_seed(0)
lowercase_ , lowercase_ = pipe_prior(
lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase_ = pipeline(
lowerCAmelCase_ , image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_)
| 313
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : str = "▁"
UpperCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase : int = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
UpperCAmelCase : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
lowercase__ = []
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : List[Any]="<pad>" , lowerCAmelCase_ : Optional[int]="<mask>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ = kwargs.get("""additional_special_tokens""" , [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase_))
lowercase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ = 1
lowercase_ = len(self.sp_model)
lowercase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase_)
}
lowercase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
lowercase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ = src_lang if src_lang is not None else """en_XX"""
lowercase_ = self.lang_code_to_id[self._src_lang]
lowercase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : List[str]):
"""simple docstring"""
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self : List[Any] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = {self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ = self.sp_model.PieceToId(lowerCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = []
lowercase_ = """"""
lowercase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_) + token
lowercase_ = True
lowercase_ = []
else:
current_sub_tokens.append(lowerCAmelCase_)
lowercase_ = False
out_string += self.sp_model.decode(lowerCAmelCase_)
return out_string.strip()
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , """wb""") as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
lowercase_ = [1] * len(self.prefix_tokens)
lowercase_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase_)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase_)) + ([0] * len(lowerCAmelCase_)) + suffix_ones
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
lowercase_ = src_lang
lowercase_ = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = self.convert_tokens_to_ids(lowerCAmelCase_)
lowercase_ = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : int , ):
"""simple docstring"""
lowercase_ = src_lang
lowercase_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[src_lang]
lowercase_ = [self.cur_lang_code_id]
lowercase_ = [self.eos_token_id]
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[tgt_lang]
lowercase_ = [self.cur_lang_code_id]
lowercase_ = [self.eos_token_id]
| 313
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 1
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCAmelCase : Tuple = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
return (preds == labels).mean()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
lowercase_ = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
lowercase_ = pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0]
lowercase_ = spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F'''Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
| 313
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313
| 1
|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "M-CLIP"
def __init__( self : List[Any] , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : List[str]=7_6_8 , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = transformerDimSize
lowercase_ = imageDimSize
super().__init__(**lowerCAmelCase_)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = MCLIPConfig
def __init__( self : List[str] , lowerCAmelCase_ : int , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = XLMRobertaModel(lowerCAmelCase_)
lowercase_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = self.transformer(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
lowercase_ = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(lowerCAmelCase_), embs
| 313
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase : int = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = ["LayoutLMv2FeatureExtractor"]
UpperCAmelCase : Union[str, Any] = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313
| 1
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase : List[str] = ""
UpperCAmelCase : List[str] = ""
UpperCAmelCase : Any = ""
UpperCAmelCase : str = ""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = tweepy.OAuthHandler(__lowerCAmelCase , __lowerCAmelCase )
auth.set_access_token(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = tweepy.API(__lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
lowercase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowercase_ = api.user_timeline(screen_name=__lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(__lowerCAmelCase )
# save the id of the oldest tweet less one
lowercase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__lowerCAmelCase ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowercase_ = api.user_timeline(
screen_name=__lowerCAmelCase , count=2_00 , max_id=__lowerCAmelCase )
# save most recent tweets
alltweets.extend(__lowerCAmelCase )
# update the id of the oldest tweet less one
lowercase_ = alltweets[-1].id - 1
print(F'''...{len(__lowerCAmelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowercase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , """w""" ) as f:
lowercase_ = csv.writer(__lowerCAmelCase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(__lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 313
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : int = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = ["model.decoder.embed_positions.weights"]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if "emb" in name:
lowercase_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowercase_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowercase_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowercase_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowercase_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowercase_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowercase_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowercase_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowercase_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowercase_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple[Dict, Dict]:
'''simple docstring'''
lowercase_ = list(state_dict.keys() )
lowercase_ = {}
for key in keys:
lowercase_ = state_dict.pop(__lowerCAmelCase )
lowercase_ = rename_keys(__lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase_ = val[:hidden_size, :]
lowercase_ = val[hidden_size : 2 * hidden_size, :]
lowercase_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase_ = val
else:
lowercase_ = val
return state_dict, enc_dec_proj_state_dict
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
lowercase_ = 10_24
lowercase_ = 24
lowercase_ = 16
elif checkpoint == "medium":
lowercase_ = 15_36
lowercase_ = 48
lowercase_ = 24
elif checkpoint == "large":
lowercase_ = 20_48
lowercase_ = 48
lowercase_ = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowercase_ = MusicgenDecoderConfig(
hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , )
return config
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="cpu" ) -> List[str]:
'''simple docstring'''
lowercase_ = MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase )
lowercase_ = decoder_config_from_checkpoint(__lowerCAmelCase )
lowercase_ = fairseq_model.lm.state_dict()
lowercase_ , lowercase_ = rename_state_dict(
__lowerCAmelCase , hidden_size=decoder_config.hidden_size )
lowercase_ = TaEncoderModel.from_pretrained("""t5-base""" )
lowercase_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowercase_ = MusicgenForCausalLM(__lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase_ , lowercase_ = decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(__lowerCAmelCase ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowercase_ = MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase )
# check we can do a forward pass
lowercase_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase_ = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowercase_ = AutoTokenizer.from_pretrained("""t5-base""" )
lowercase_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowercase_ = MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# set the appropriate bos/pad token ids
lowercase_ = 20_48
lowercase_ = 20_48
# set other default generation config params
lowercase_ = int(30 * audio_encoder.config.frame_rate )
lowercase_ = True
lowercase_ = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(__lowerCAmelCase )
processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 313
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = "bit"
lowercase__ = ["preactivation", "bottleneck"]
lowercase__ = ["SAME", "VALID"]
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Tuple=6_4 , lowerCAmelCase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowerCAmelCase_ : int=[3, 4, 6, 3] , lowerCAmelCase_ : Union[str, Any]="preactivation" , lowerCAmelCase_ : Union[str, Any]="relu" , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=3_2 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types)}''')
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase_ = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''')
lowercase_ = num_channels
lowercase_ = embedding_size
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = layer_type
lowercase_ = hidden_act
lowercase_ = global_padding
lowercase_ = num_groups
lowercase_ = drop_path_rate
lowercase_ = embedding_dynamic_padding
lowercase_ = output_stride
lowercase_ = width_factor
lowercase_ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase_) + 1)]
lowercase_ , lowercase_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names)
| 313
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[str]=3_0 , lowerCAmelCase_ : str=4_0_0 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : str=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=1 / 2_5_5 , lowerCAmelCase_ : Union[str, Any]=True , ):
"""simple docstring"""
lowercase_ = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_normalize
lowercase_ = image_mean
lowercase_ = image_std
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
if not batched:
lowercase_ = image_inputs[0]
if isinstance(lowerCAmelCase_ , Image.Image):
lowercase_ , lowercase_ = image.size
else:
lowercase_ , lowercase_ = image.shape[1], image.shape[2]
if w < h:
lowercase_ = int(self.size["""shortest_edge"""] * h / w)
lowercase_ = self.size["""shortest_edge"""]
elif w > h:
lowercase_ = self.size["""shortest_edge"""]
lowercase_ = int(self.size["""shortest_edge"""] * w / h)
else:
lowercase_ = self.size["""shortest_edge"""]
lowercase_ = self.size["""shortest_edge"""]
else:
lowercase_ = []
for image in image_inputs:
lowercase_ , lowercase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
lowercase_ = max(lowerCAmelCase_ , key=lambda lowerCAmelCase_: item[0])[0]
lowercase_ = max(lowerCAmelCase_ , key=lambda lowerCAmelCase_: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = YolosImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = YolosImageProcessingTester(self)
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase_ , """size"""))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3})
self.assertEqual(image_processor.do_pad , lowerCAmelCase_)
lowercase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowerCAmelCase_)
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4})
self.assertEqual(image_processor.do_pad , lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image)
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_)
lowercase_ = image_processing(lowerCAmelCase_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray)
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ = image_processing(lowerCAmelCase_ , return_tensors="""pt""").pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ = image_processing(lowerCAmelCase_ , return_tensors="""pt""").pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
lowercase_ = self.image_processing_class(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , do_rescale=lowerCAmelCase_)
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ = image_processing_a.pad(lowerCAmelCase_ , return_tensors="""pt""")
lowercase_ = image_processing_a(lowerCAmelCase_ , return_tensors="""pt""")
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4))
@slow
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f:
lowercase_ = json.loads(f.read())
lowercase_ = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
lowercase_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""")
lowercase_ = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , return_tensors="""pt""")
# verify pixel values
lowercase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase_)
lowercase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase_ , atol=1E-4))
# verify area
lowercase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase_))
# verify boxes
lowercase_ = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase_)
lowercase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase_ , atol=1E-3))
# verify image_id
lowercase_ = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase_))
# verify is_crowd
lowercase_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase_))
# verify class_labels
lowercase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase_))
# verify orig_size
lowercase_ = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase_))
# verify size
lowercase_ = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase_))
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f:
lowercase_ = json.loads(f.read())
lowercase_ = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
lowercase_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""")
# encode them
lowercase_ = YolosImageProcessor(format="""coco_panoptic""")
lowercase_ = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , masks_path=lowerCAmelCase_ , return_tensors="""pt""")
# verify pixel values
lowercase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase_)
lowercase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase_ , atol=1E-4))
# verify area
lowercase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase_))
# verify boxes
lowercase_ = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase_)
lowercase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase_ , atol=1E-3))
# verify image_id
lowercase_ = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase_))
# verify is_crowd
lowercase_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase_))
# verify class_labels
lowercase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase_))
# verify masks
lowercase_ = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCAmelCase_)
# verify orig_size
lowercase_ = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase_))
# verify size
lowercase_ = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase_))
| 313
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.