code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
UpperCAmelCase__ = MaskFormerConfig(backbone_config=lowerCamelCase )
UpperCAmelCase__ = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase__ = 8_4_7
UpperCAmelCase__ = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
UpperCAmelCase__ = 1_5_0
UpperCAmelCase__ = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase__ = 1_7_1
UpperCAmelCase__ = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
UpperCAmelCase__ = 1_3_3
UpperCAmelCase__ = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase__ = 1_9
UpperCAmelCase__ = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase__ = 6_5
UpperCAmelCase__ = 'mapillary-vistas-id2label.json'
UpperCAmelCase__ = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = dct.pop(lowerCamelCase )
UpperCAmelCase__ = val
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCAmelCase__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[:dim, :]
UpperCAmelCase__ = in_proj_bias[: dim]
UpperCAmelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase__ = in_proj_weight[
-dim :, :
]
UpperCAmelCase__ = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCamelCase , lowerCamelCase ):
# fmt: off
UpperCAmelCase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[: hidden_size, :]
UpperCAmelCase__ = in_proj_bias[:config.hidden_size]
UpperCAmelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[: hidden_size, :]
UpperCAmelCase__ = in_proj_bias[:config.hidden_size]
UpperCAmelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
UpperCAmelCase__ = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , 'rb' ) as f:
UpperCAmelCase__ = pickle.load(lowerCamelCase )
UpperCAmelCase__ = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase__ = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase__ = torch.from_numpy(lowerCamelCase )
# load 🤗 model
UpperCAmelCase__ = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
UpperCAmelCase__ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase__ = 6_5
elif "cityscapes" in model_name:
UpperCAmelCase__ = 6_5_5_3_5
else:
UpperCAmelCase__ = 2_5_5
UpperCAmelCase__ = True if 'ade' in model_name else False
UpperCAmelCase__ = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
UpperCAmelCase__ = image_processor(lowerCamelCase , return_tensors='pt' )
UpperCAmelCase__ = model(**lowerCamelCase )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase__ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 98 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import math
import os
import sys
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ''
try:
with open(lowerCamelCase , 'rb' ) as binary_file:
UpperCAmelCase__ = binary_file.read()
for dat in data:
UpperCAmelCase__ = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lexicon.pop(lowerCamelCase )
UpperCAmelCase__ = last_match_id
if math.loga(lowerCamelCase ).is_integer():
for curr_key in lexicon:
UpperCAmelCase__ = '0' + lexicon[curr_key]
UpperCAmelCase__ = bin(lowerCamelCase )[2:]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {'0': '0', '1': '1'}
UpperCAmelCase__ , UpperCAmelCase__ = '', ''
UpperCAmelCase__ = len(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase__ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
index += 1
UpperCAmelCase__ = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCAmelCase__ = lexicon[curr_string]
result += last_match_id
return result
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = os.path.getsize(lowerCamelCase )
UpperCAmelCase__ = bin(lowerCamelCase )[2:]
UpperCAmelCase__ = len(lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = 8
try:
with open(lowerCamelCase , 'wb' ) as opened_file:
UpperCAmelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = read_file_binary(lowerCamelCase )
UpperCAmelCase__ = compress_data(lowerCamelCase )
UpperCAmelCase__ = add_file_length(lowerCamelCase , lowerCamelCase )
write_file_binary(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 98 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | 1 |
"""simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ : List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( lowerCamelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(lowerCamelCase )
UpperCAmelCase__ = [n]
for i in range(1 , len(lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def a_ ( lowerCamelCase ):
if len(str(lowerCamelCase ) ) > 3:
if not is_prime(int(str(lowerCamelCase )[-3:] ) ) or not is_prime(int(str(lowerCamelCase )[:3] ) ):
return False
return True
def a_ ( lowerCamelCase = 1_1 ):
UpperCAmelCase__ = []
UpperCAmelCase__ = 1_3
while len(lowerCamelCase ) != count:
if validate(lowerCamelCase ):
UpperCAmelCase__ = list_truncated_nums(lowerCamelCase )
if all(is_prime(lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase )
num += 2
return list_truncated_primes
def a_ ( ):
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 98 | """simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | 1 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "align_text_model"
def __init__( self : Tuple ,lowerCamelCase__ : str=30_522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Union[str, Any]=3_072 ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Tuple=512 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : Optional[Any]=1e-12 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Dict=True ,**lowerCamelCase__ : Dict ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = pad_token_id
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : List[str] ):
cls._set_token_in_kwargs(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "align_vision_model"
def __init__( self : Any ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 600 ,lowerCamelCase__ : float = 2.0 ,lowerCamelCase__ : float = 3.1 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,lowerCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] ,lowerCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] ,lowerCamelCase__ : List[int] = [] ,lowerCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,lowerCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,lowerCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,lowerCamelCase__ : float = 0.2_5 ,lowerCamelCase__ : str = "swish" ,lowerCamelCase__ : int = 2_560 ,lowerCamelCase__ : str = "mean" ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : float = 0.0_0_1 ,lowerCamelCase__ : float = 0.9_9 ,lowerCamelCase__ : float = 0.2 ,**lowerCamelCase__ : Tuple ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = width_coefficient
UpperCAmelCase__ = depth_coefficient
UpperCAmelCase__ = depth_divisor
UpperCAmelCase__ = kernel_sizes
UpperCAmelCase__ = in_channels
UpperCAmelCase__ = out_channels
UpperCAmelCase__ = depthwise_padding
UpperCAmelCase__ = strides
UpperCAmelCase__ = num_block_repeats
UpperCAmelCase__ = expand_ratios
UpperCAmelCase__ = squeeze_expansion_ratio
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dim
UpperCAmelCase__ = pooling_type
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = batch_norm_eps
UpperCAmelCase__ = batch_norm_momentum
UpperCAmelCase__ = drop_connect_rate
UpperCAmelCase__ = sum(lowerCamelCase__ ) * 4
@classmethod
def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : Optional[int] ):
cls._set_token_in_kwargs(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "align"
snake_case__ = True
def __init__( self : Tuple ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : int=640 ,lowerCamelCase__ : List[Any]=1.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,**lowerCamelCase__ : str ,):
super().__init__(**lowerCamelCase__ )
if text_config is None:
UpperCAmelCase__ = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
UpperCAmelCase__ = AlignTextConfig(**lowerCamelCase__ )
UpperCAmelCase__ = AlignVisionConfig(**lowerCamelCase__ )
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = temperature_init_value
UpperCAmelCase__ = initializer_range
@classmethod
def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : AlignTextConfig ,lowerCamelCase__ : AlignVisionConfig ,**lowerCamelCase__ : Optional[int] ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 98 | """simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Optional[int] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def a_ ( lowerCamelCase ):
return input_array.reshape((input_array.size, 1) )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = np.nan
for i in range(lowerCamelCase ):
UpperCAmelCase__ = features[:, labels == i]
UpperCAmelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase__ = data - column_reshape(lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ = np.dot(lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = features.mean(1 )
UpperCAmelCase__ = np.nan
for i in range(lowerCamelCase ):
UpperCAmelCase__ = features[:, labels == i]
UpperCAmelCase__ = data.shape[1]
UpperCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ = device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def a_ ( lowerCamelCase , lowerCamelCase ):
# Check if the features have been loaded
if features.any():
UpperCAmelCase__ = features.mean(1 )
# Center the dataset
UpperCAmelCase__ = features - np.reshape(lowerCamelCase , (data_mean.size, 1) )
UpperCAmelCase__ = np.dot(lowerCamelCase , centered_data.T ) / features.shape[1]
UpperCAmelCase__ , UpperCAmelCase__ = np.linalg.eigh(lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase__ = np.dot(filtered_eigenvectors.T , lowerCamelCase )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase__ , UpperCAmelCase__ = eigh(
covariance_between_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , covariance_within_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , )
UpperCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = np.linalg.svd(lowerCamelCase )
UpperCAmelCase__ = svd_matrix[:, 0:dimensions]
UpperCAmelCase__ = np.dot(filtered_svd_matrix.T , lowerCamelCase )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase__ = 2
UpperCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase ) as error_info:
UpperCAmelCase__ = linear_discriminant_analysis(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def a_ ( ):
UpperCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase__ = 2
UpperCAmelCase__ = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase ) as error_info:
UpperCAmelCase__ = principal_component_analysis(lowerCamelCase , lowerCamelCase )
if not np.allclose(lowerCamelCase , lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "microsoft/speecht5_tts"
snake_case__ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
snake_case__ = "text_reader"
snake_case__ = SpeechTaProcessor
snake_case__ = SpeechTaForTextToSpeech
snake_case__ = SpeechTaHifiGan
snake_case__ = ["text"]
snake_case__ = ["audio"]
def __lowerCAmelCase ( self : Dict ):
if self.post_processor is None:
UpperCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict=None ):
UpperCAmelCase__ = self.pre_processor(text=lowerCamelCase__ ,return_tensors='pt' ,truncation=lowerCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
UpperCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' ,split='validation' )
UpperCAmelCase__ = torch.tensor(embeddings_dataset[7_305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[str] ):
with torch.no_grad():
return self.model.generate_speech(**lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Optional[int] ):
with torch.no_grad():
return self.post_processor(lowerCamelCase__ ).cpu().detach()
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "blip_text_model"
def __init__( self : Tuple ,lowerCamelCase__ : str=30_524 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=768 ,lowerCamelCase__ : Union[str, Any]=3_072 ,lowerCamelCase__ : str=768 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : List[Any]=512 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : Dict=1e-12 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : List[str]=30_522 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Tuple=0 ,lowerCamelCase__ : str=102 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : str=True ,**lowerCamelCase__ : Optional[Any] ,):
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,sep_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = encoder_hidden_size
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = is_decoder
UpperCAmelCase__ = use_cache
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : List[Any] ):
cls._set_token_in_kwargs(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
UpperCAmelCase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "blip_vision_model"
def __init__( self : Tuple ,lowerCamelCase__ : List[str]=768 ,lowerCamelCase__ : Any=3_072 ,lowerCamelCase__ : List[Any]=512 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : List[str]=384 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Any="gelu" ,lowerCamelCase__ : Any=1e-5 ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : Optional[int]=1e-10 ,**lowerCamelCase__ : Union[str, Any] ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = hidden_act
@classmethod
def __lowerCAmelCase ( cls : int ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : List[str] ):
cls._set_token_in_kwargs(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
UpperCAmelCase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "blip"
snake_case__ = True
def __init__( self : Any ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Dict=512 ,lowerCamelCase__ : List[str]=2.6_5_9_2 ,lowerCamelCase__ : List[Any]=256 ,**lowerCamelCase__ : int ,):
super().__init__(**lowerCamelCase__ )
if text_config is None:
UpperCAmelCase__ = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
UpperCAmelCase__ = BlipTextConfig(**lowerCamelCase__ )
UpperCAmelCase__ = BlipVisionConfig(**lowerCamelCase__ )
UpperCAmelCase__ = self.vision_config.hidden_size
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = logit_scale_init_value
UpperCAmelCase__ = 1.0
UpperCAmelCase__ = 0.0_2
UpperCAmelCase__ = image_text_hidden_size
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,lowerCamelCase__ : BlipTextConfig ,lowerCamelCase__ : BlipVisionConfig ,**lowerCamelCase__ : int ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 98 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ : int = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ : Any = object()
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(lowerCamelCase ) - len(lowerCamelCase ) + 1 ):
UpperCAmelCase__ = [x.match(lowerCamelCase ) for x, y in zip(lowerCamelCase , ks[i:] )]
if matches and all(lowerCamelCase ):
return True
return False
def a_ ( lowerCamelCase ):
def replace(lowerCamelCase , lowerCamelCase ):
for rule, replacement in rules:
if _match(lowerCamelCase , lowerCamelCase ):
return replacement
return val
return replace
def a_ ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase )),
(("transformer", "wte", "embedding"), P('mp' , lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = _get_partition_rules()
UpperCAmelCase__ = _replacement_rules(lowerCamelCase )
UpperCAmelCase__ = {k: _unmatched for k in flatten_dict(lowerCamelCase )}
UpperCAmelCase__ = {k: replace(lowerCamelCase , lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase ) )
| 98 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def a_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
UpperCAmelCase__ = getattr(lowerCamelCase , 'handle_key' , [] )
handle += [key]
setattr(lowerCamelCase , 'handle_key' , lowerCamelCase )
return func
return decorator
def a_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
UpperCAmelCase__ = getattr(lowerCamelCase , 'handle_key' , [] )
handle += keys
setattr(lowerCamelCase , 'handle_key' , lowerCamelCase )
return func
return decorator
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __new__( cls : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = super().__new__(cls ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if not hasattr(lowerCamelCase__ ,'key_handler' ):
setattr(lowerCamelCase__ ,'key_handler' ,{} )
setattr(lowerCamelCase__ ,'handle_input' ,KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase__ = getattr(lowerCamelCase__ ,'handle_key' ,[] )
for key in handled_keys:
UpperCAmelCase__ = value
return new_cls
@staticmethod
def __lowerCAmelCase ( cls : Union[str, Any] ):
UpperCAmelCase__ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase__ = ord(lowerCamelCase__ )
UpperCAmelCase__ = cls.key_handler.get(lowerCamelCase__ )
if handler:
UpperCAmelCase__ = char
return handler(cls )
else:
return None
def a_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 98 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | 1 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path / 'cache'
UpperCAmelCase__ = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_text_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path / 'cache'
UpperCAmelCase__ = {'text': 'string'}
UpperCAmelCase__ = features.copy() if features else default_expected_features
UpperCAmelCase__ = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ = TextDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_text_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path / 'cache'
UpperCAmelCase__ = {'text': 'string'}
UpperCAmelCase__ = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read()
_check_text_dataset(lowerCamelCase , lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if issubclass(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = text_path
elif issubclass(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [text_path]
UpperCAmelCase__ = tmp_path / 'cache'
UpperCAmelCase__ = {'text': 'string'}
UpperCAmelCase__ = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_text_dataset(lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=("train",) ):
assert isinstance(lowerCamelCase , lowerCamelCase )
for split in splits:
UpperCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path / 'cache'
UpperCAmelCase__ = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ = TextDatasetReader({'train': text_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_text_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase__ = {'text': 'string'}
UpperCAmelCase__ = features.copy() if features else default_expected_features
UpperCAmelCase__ = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ = TextDatasetReader({'train': text_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_text_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if split:
UpperCAmelCase__ = {split: text_path}
else:
UpperCAmelCase__ = 'train'
UpperCAmelCase__ = {'train': text_path, 'test': text_path}
UpperCAmelCase__ = tmp_path / 'cache'
UpperCAmelCase__ = {'text': 'string'}
UpperCAmelCase__ = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_text_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 98 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase__ : Tuple = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ):
UpperCAmelCase__ = True
while ask_again:
UpperCAmelCase__ = input(lowerCamelCase )
try:
if default is not None and len(lowerCamelCase ) == 0:
return default
return convert_value(lowerCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase=[] , lowerCamelCase=None , lowerCamelCase=0 ):
UpperCAmelCase__ = BulletMenu(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = menu.run(default_choice=lowerCamelCase )
return convert_value(lowerCamelCase ) if convert_value is not None else result
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = int(lowerCamelCase )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = int(lowerCamelCase )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = int(lowerCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = int(lowerCamelCase )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = int(lowerCamelCase )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def a_ ( lowerCamelCase ):
return {"yes": True, "no": False}[value.lower()]
class snake_case ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = super()._format_usage(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = usage.replace('<command> [<args>] ' ,'' )
return usage
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase__ : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
lowerCAmelCase__ : int = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
lowerCAmelCase__ : int = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase__ : Dict = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase__ : Union[str, Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def a_ ( lowerCamelCase , lowerCamelCase ):
for tf_name, hf_name in patterns:
UpperCAmelCase__ = k.replace(lowerCamelCase , lowerCamelCase )
return k
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BigBirdPegasusConfig(**lowerCamelCase )
UpperCAmelCase__ = BigBirdPegasusForConditionalGeneration(lowerCamelCase )
UpperCAmelCase__ = torch_model.state_dict()
UpperCAmelCase__ = {}
# separating decoder weights
UpperCAmelCase__ = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
UpperCAmelCase__ = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
UpperCAmelCase__ = [k.endswith(lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase ):
continue
UpperCAmelCase__ = DECODER_PATTERNS
UpperCAmelCase__ = rename_state_dict_key(lowerCamelCase , lowerCamelCase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.from_numpy(lowerCamelCase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
UpperCAmelCase__ = [k.endswith(lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase ):
continue
UpperCAmelCase__ = REMAINING_PATTERNS
UpperCAmelCase__ = rename_state_dict_key(lowerCamelCase , lowerCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.from_numpy(lowerCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
UpperCAmelCase__ = mapping['model.embed_positions.weight']
UpperCAmelCase__ = mapping.pop('model.embed_positions.weight' )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
UpperCAmelCase__ = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.train.list_variables(lowerCamelCase )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['global_step']
for name, shape in tqdm(lowerCamelCase , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = array
return tf_weights
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = get_tf_weights_as_numpy(lowerCamelCase )
UpperCAmelCase__ = convert_bigbird_pegasus(lowerCamelCase , lowerCamelCase )
torch_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ : str = parser.parse_args()
lowerCAmelCase__ : int = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 98 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class snake_case :
"""simple docstring"""
def __init__( self : Tuple ):
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ):
UpperCAmelCase__ = cva.imread(lowerCamelCase__ ,0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() ,256 ,[0, 256] ,label='x' )
UpperCAmelCase__ = np.sum(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase__ )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' ,self.img )
def __lowerCAmelCase ( self : Optional[Any] ):
plt.hist(self.img.ravel() ,256 ,[0, 256] )
def __lowerCAmelCase ( self : int ):
cva.imshow('Output-Image' ,self.img )
cva.imshow('Input-Image' ,self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase__ : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 98 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
@dataclass
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Optional[int] ,**lowerCamelCase__ : Tuple ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase__ = deprecated_arg[3:]
UpperCAmelCase__ = not kwargs.pop(lowerCamelCase__ )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase__ = kwargs.pop('tpu_name' ,self.tpu_name )
UpperCAmelCase__ = kwargs.pop('device_idx' ,self.device_idx )
UpperCAmelCase__ = kwargs.pop('eager_mode' ,self.eager_mode )
UpperCAmelCase__ = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**lowerCamelCase__ )
snake_case__ = field(
default=__UpperCAmelCase , metadata={"help": "Name of TPU"} , )
snake_case__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Benchmark models in eager model."} )
snake_case__ = field(
default=__UpperCAmelCase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ):
requires_backends(self ,['tf'] )
UpperCAmelCase__ = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase__ = None
return tpu
@cached_property
def __lowerCAmelCase ( self : Dict ):
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase__ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
UpperCAmelCase__ = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
UpperCAmelCase__ = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def __lowerCAmelCase ( self : int ):
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def __lowerCAmelCase ( self : Tuple ):
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def __lowerCAmelCase ( self : int ):
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __lowerCAmelCase ( self : Tuple ):
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __lowerCAmelCase ( self : Tuple ):
return self.n_gpu > 0
| 98 | """simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def a_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
UpperCAmelCase__ = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCAmelCase__ = datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
UpperCAmelCase__ = f'''{dataset}-{pair}'''
UpperCAmelCase__ = Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCAmelCase__ = 'val' if split == 'validation' else split
UpperCAmelCase__ = save_dir.joinpath(f'''{fn}.source''' )
UpperCAmelCase__ = save_dir.joinpath(f'''{fn}.target''' )
UpperCAmelCase__ = src_path.open('w+' )
UpperCAmelCase__ = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCAmelCase__ = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 98 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 1 |
"""simple docstring"""
import heapq
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase__ = heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase__ = elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
"""simple docstring"""
import math
import sys
def a_ ( lowerCamelCase ):
if number != int(lowerCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
UpperCAmelCase__ = [-1] * (number + 1)
UpperCAmelCase__ = 0
for i in range(1 , number + 1 ):
UpperCAmelCase__ = sys.maxsize
UpperCAmelCase__ = int(math.sqrt(lowerCamelCase ) )
for j in range(1 , root + 1 ):
UpperCAmelCase__ = 1 + answers[i - (j**2)]
UpperCAmelCase__ = min(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from math import isqrt
def a_ ( lowerCamelCase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase ) + 1 ) )
def a_ ( lowerCamelCase = 1_0**6 ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Tuple ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Dict[str, int]] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,**lowerCamelCase__ : List[str] ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Tuple ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase__ = get_resize_output_image_size(lowerCamelCase__ ,size=size['shortest_edge'] ,default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Tuple ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : float ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : int ):
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[Any] ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[float] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase__ : Union[str, Any] ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
UpperCAmelCase__ = [self.center_crop(image=lowerCamelCase__ ,size=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Tuple] = None ):
UpperCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowerCamelCase__ ):
UpperCAmelCase__ = target_sizes.numpy()
UpperCAmelCase__ = []
for idx in range(len(lowerCamelCase__ ) ):
UpperCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=lowerCamelCase__ )
UpperCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = logits.argmax(dim=1 )
UpperCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 98 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
if len(lowerCamelCase ) <= 1:
return [tuple(lowerCamelCase )]
UpperCAmelCase__ = []
def generate(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [0] * n
res.append(tuple(lowerCamelCase ) )
UpperCAmelCase__ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCAmelCase__ , UpperCAmelCase__ = arr[i], arr[0]
else:
UpperCAmelCase__ , UpperCAmelCase__ = arr[i], arr[c[i]]
res.append(tuple(lowerCamelCase ) )
c[i] += 1
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = 0
i += 1
generate(len(lowerCamelCase ) , lowerCamelCase )
return res
if __name__ == "__main__":
lowerCAmelCase__ : int = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 98 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | 1 |
"""simple docstring"""
import functools
from typing import Any
def a_ ( lowerCamelCase , lowerCamelCase ):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase ) or len(lowerCamelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowerCamelCase , lowerCamelCase ) or not all(
isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'WORD_KEEPER'
for word in words:
UpperCAmelCase__ = trie
for c in word:
if c not in trie_node:
UpperCAmelCase__ = {}
UpperCAmelCase__ = trie_node[c]
UpperCAmelCase__ = True
UpperCAmelCase__ = len(lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(lowerCamelCase ) -> bool:
if index == len_string:
return True
UpperCAmelCase__ = trie
for i in range(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = trie_node.get(string[i] , lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(lowerCamelCase , lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = tf.cast(math.pi , x.dtype )
UpperCAmelCase__ = tf.cast(0.044715 , x.dtype )
UpperCAmelCase__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = tf.cast(0.044715 , x.dtype )
UpperCAmelCase__ = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -1_0 , 1_0 )
def a_ ( lowerCamelCase , lowerCamelCase=-1 ):
UpperCAmelCase__ , UpperCAmelCase__ = tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def a_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
lowerCAmelCase__ : str = tf.keras.activations.gelu
lowerCAmelCase__ : Tuple = approximate_gelu_wrap
else:
lowerCAmelCase__ : str = _gelu
lowerCAmelCase__ : Optional[Any] = _gelu_new
lowerCAmelCase__ : Any = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def a_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import requests
lowerCAmelCase__ : Optional[int] = 'YOUR API KEY'
def a_ ( lowerCamelCase , lowerCamelCase = giphy_api_key ):
UpperCAmelCase__ = '+'.join(query.split() )
UpperCAmelCase__ = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
UpperCAmelCase__ = requests.get(lowerCamelCase ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 98 | """simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any]=13 ,lowerCamelCase__ : Dict=32 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Union[str, Any]=4 ,lowerCamelCase__ : Optional[int]=[10, 20, 30, 40] ,lowerCamelCase__ : Dict=[2, 2, 3, 2] ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=37 ,lowerCamelCase__ : Optional[Any]="gelu" ,lowerCamelCase__ : List[str]=10 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : int=["stage2", "stage3", "stage4"] ,lowerCamelCase__ : Optional[int]=[2, 3, 4] ,lowerCamelCase__ : List[Any]=None ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = num_stages
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = depths
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = out_features
UpperCAmelCase__ = out_indices
UpperCAmelCase__ = scope
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : str ):
return ConvNextConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = ConvNextModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = ConvNextForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ = None
UpperCAmelCase__ = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = ConvNextModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : Any ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
def check_hidden_states_output(lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = ConvNextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def a_ ( ):
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Tuple ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(lowerCamelCase__ )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**lowerCamelCase__ )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
@require_torch
class snake_case ( unittest.TestCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (ConvNextBackbone,) if is_torch_available() else ()
snake_case__ = ConvNextConfig
snake_case__ = False
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = ConvNextModelTester(self )
| 98 | """simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from itertools import product
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = sides_number
UpperCAmelCase__ = max_face_number * dice_number
UpperCAmelCase__ = [0] * (max_total + 1)
UpperCAmelCase__ = 1
UpperCAmelCase__ = range(lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(lowerCamelCase , repeat=lowerCamelCase ):
UpperCAmelCase__ = sum(lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ):
UpperCAmelCase__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCAmelCase__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 9
UpperCAmelCase__ = 4 * 9
UpperCAmelCase__ = 6
for peter_total in range(lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCAmelCase__ = (4**9) * (6**6)
UpperCAmelCase__ = peter_wins_count / total_games_number
UpperCAmelCase__ = round(lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase__ : int = 250_004
lowerCAmelCase__ : Optional[int] = 250_020
@require_sentencepiece
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = MBartTokenizer
snake_case__ = MBartTokenizerFast
snake_case__ = True
snake_case__ = True
def __lowerCAmelCase ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = MBartTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = MBartTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def __lowerCAmelCase ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase__ ,legacy_format=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase__ ,legacy_format=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = "facebook/mbart-large-en-ro"
snake_case__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
snake_case__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
snake_case__ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def __lowerCAmelCase ( cls : int ):
UpperCAmelCase__ = MBartTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='en_XX' ,tgt_lang='ro_RO' )
UpperCAmelCase__ = 1
return cls
def __lowerCAmelCase ( self : int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] ,250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] ,250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] ,250_020 )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
self.assertIn(lowerCamelCase__ ,self.tokenizer.all_special_ids )
UpperCAmelCase__ = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase__ = self.tokenizer.decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
UpperCAmelCase__ = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] ,lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.tokenizer(lowerCamelCase__ ,max_length=lowerCamelCase__ ,truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) ,[250_026, 250_001] )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = MBartTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase__ ,return_tensors='pt' )
UpperCAmelCase__ = shift_tokens_right(batch['labels'] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=len(self.expected_src_tokens ) ,return_tensors='pt' ,)
UpperCAmelCase__ = shift_tokens_right(batch['labels'] ,self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
UpperCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase__ )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, EN_CODE] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.tokenizer(self.src_text ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=3 ,return_tensors='pt' )
UpperCAmelCase__ = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=10 ,return_tensors='pt' )
UpperCAmelCase__ = targets['input_ids']
UpperCAmelCase__ = shift_tokens_right(lowerCamelCase__ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.tokenizer._build_translation_inputs(
'A test' ,return_tensors='pt' ,src_lang='en_XX' ,tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) ,{
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} ,)
| 98 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCAmelCase__ : Optional[Any] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
lowerCAmelCase__ : List[str] = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
lowerCAmelCase__ : Any = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) ,codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] ,reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] ,)
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : str ,lowerCamelCase__ : str ):
UpperCAmelCase__ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCAmelCase__ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCAmelCase__ = evaluate(dataset=lowerCamelCase__ ,predictions=lowerCamelCase__ )
return score
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase , lowerCamelCase ):
if nth_term == "":
return [""]
UpperCAmelCase__ = int(lowerCamelCase )
UpperCAmelCase__ = int(lowerCamelCase )
UpperCAmelCase__ = []
for temp in range(int(lowerCamelCase ) ):
series.append(f'''1 / {pow(temp + 1 , int(lowerCamelCase ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : Optional[int] = int(input('Enter the last number (nth term) of the P-Series'))
lowerCAmelCase__ : Optional[Any] = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def a_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
UpperCAmelCase__ = 1_1
UpperCAmelCase__ = int('1' + '0' * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
UpperCAmelCase__ = 1_0
return solutions
def a_ ( lowerCamelCase = 2 ):
UpperCAmelCase__ = 1.0
for fraction in fraction_list(lowerCamelCase ):
UpperCAmelCase__ = Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 98 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 1 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase__ : Optional[int] = 'path-to-your-trained-model'
lowerCAmelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
lowerCAmelCase__ : Tuple = 'A photo of sks dog in a bucket'
lowerCAmelCase__ : Union[str, Any] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 98 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=1e-1_2 ):
UpperCAmelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCamelCase , axis=1 ) , a_min=lowerCamelCase ) ).T
UpperCAmelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCamelCase , axis=1 ) , a_min=lowerCamelCase ) ).T
return jnp.matmul(lowerCamelCase , norm_emb_a.T )
class snake_case ( nn.Module ):
"""simple docstring"""
snake_case__ = 42
snake_case__ = jnp.floataa
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = FlaxCLIPVisionModule(self.config.vision_config )
UpperCAmelCase__ = nn.Dense(self.config.projection_dim ,use_bias=lowerCamelCase__ ,dtype=self.dtype )
UpperCAmelCase__ = self.param('concept_embeds' ,jax.nn.initializers.ones ,(17, self.config.projection_dim) )
UpperCAmelCase__ = self.param(
'special_care_embeds' ,jax.nn.initializers.ones ,(3, self.config.projection_dim) )
UpperCAmelCase__ = self.param('concept_embeds_weights' ,jax.nn.initializers.ones ,(17,) )
UpperCAmelCase__ = self.param('special_care_embeds_weights' ,jax.nn.initializers.ones ,(3,) )
def __call__( self : Union[str, Any] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = self.vision_model(lowerCamelCase__ )[1]
UpperCAmelCase__ = self.visual_projection(lowerCamelCase__ )
UpperCAmelCase__ = jax_cosine_distance(lowerCamelCase__ ,self.special_care_embeds )
UpperCAmelCase__ = jax_cosine_distance(lowerCamelCase__ ,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCAmelCase__ = jnp.round(lowerCamelCase__ ,3 )
UpperCAmelCase__ = jnp.any(special_scores > 0 ,axis=1 ,keepdims=lowerCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCAmelCase__ = is_special_care * 0.0_1
UpperCAmelCase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCAmelCase__ = jnp.round(lowerCamelCase__ ,3 )
UpperCAmelCase__ = jnp.any(concept_scores > 0 ,axis=1 )
return has_nsfw_concepts
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = CLIPConfig
snake_case__ = "clip_input"
snake_case__ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Optional[Any] ,lowerCamelCase__ : CLIPConfig ,lowerCamelCase__ : Optional[Tuple] = None ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : jnp.dtype = jnp.floataa ,lowerCamelCase__ : bool = True ,**lowerCamelCase__ : Any ,):
if input_shape is None:
UpperCAmelCase__ = (1, 224, 224, 3)
UpperCAmelCase__ = self.module_class(config=lowerCamelCase__ ,dtype=lowerCamelCase__ ,**lowerCamelCase__ )
super().__init__(lowerCamelCase__ ,lowerCamelCase__ ,input_shape=lowerCamelCase__ ,seed=lowerCamelCase__ ,dtype=lowerCamelCase__ ,_do_init=_do_init )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : jax.random.KeyArray ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : FrozenDict = None ):
# init input tensor
UpperCAmelCase__ = jax.random.normal(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = jax.random.split(lowerCamelCase__ )
UpperCAmelCase__ = {'params': params_rng, 'dropout': dropout_rng}
UpperCAmelCase__ = self.module.init(lowerCamelCase__ ,lowerCamelCase__ )['params']
return random_params
def __call__( self : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : dict = None ,):
UpperCAmelCase__ = jnp.transpose(lowerCamelCase__ ,(0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} ,jnp.array(lowerCamelCase__ ,dtype=jnp.floataa ) ,rngs={} ,)
| 98 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | 1 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def a_ ( lowerCamelCase ): # picklable for multiprocessing
return x.sum()
def a_ ( lowerCamelCase ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 1
UpperCAmelCase__ = [1, 2]
UpperCAmelCase__ = {'a': 1, 'b': 2}
UpperCAmelCase__ = {'a': [1, 2], 'b': [3, 4]}
UpperCAmelCase__ = {'a': {'1': 1}, 'b': 2}
UpperCAmelCase__ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 2
UpperCAmelCase__ = [2, 3]
UpperCAmelCase__ = {'a': 2, 'b': 3}
UpperCAmelCase__ = {'a': [2, 3], 'b': [4, 5]}
UpperCAmelCase__ = {'a': {'1': 2}, 'b': 3}
UpperCAmelCase__ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
UpperCAmelCase__ = 2
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
UpperCAmelCase__ = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
UpperCAmelCase__ = {'a': 2, 'b': 0, 'c': 2}
UpperCAmelCase__ = {
'a': np.eye(2 ).astype(lowerCamelCase__ ),
'b': np.zeros(3 ).astype(lowerCamelCase__ ),
'c': np.ones(2 ).astype(lowerCamelCase__ ),
}
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,map_numpy=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ ,lowerCamelCase__ ,map_numpy=lowerCamelCase__ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,map_numpy=lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ ,lowerCamelCase__ ,map_numpy=lowerCamelCase__ ,num_proc=lowerCamelCase__ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
with self.assertRaises(lowerCamelCase__ ): # can't pickle a local lambda
map_nested(lambda lowerCamelCase__ : x + 1 ,lowerCamelCase__ ,num_proc=lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = {'a': 1, 'b': 2}
UpperCAmelCase__ = {'a': 3, 'b': 4}
UpperCAmelCase__ = {'a': 5, 'b': 6}
UpperCAmelCase__ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
class snake_case :
"""simple docstring"""
snake_case__ = "bar"
UpperCAmelCase__ = Foo()
self.assertEqual(foo.my_attr ,'bar' )
with temporary_assignment(lowerCamelCase__ ,'my_attr' ,'BAR' ):
self.assertEqual(foo.my_attr ,'BAR' )
self.assertEqual(foo.my_attr ,'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
UpperCAmelCase__ = {f'''{i}''': i for i in range(lowerCamelCase )}
UpperCAmelCase__ = map_nested(lambda lowerCamelCase : x + 1_0 , lowerCamelCase , num_proc=lowerCamelCase , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
@require_tf
def __lowerCAmelCase ( self : Any ):
import tensorflow as tf
from tensorflow.keras import layers
UpperCAmelCase__ = layers.Dense(2 )
def gen_random_output():
UpperCAmelCase__ = tf.random.uniform((1, 3) )
return model(lowerCamelCase__ ).numpy()
with temp_seed(42 ,set_tensorflow=lowerCamelCase__ ):
UpperCAmelCase__ = gen_random_output()
with temp_seed(42 ,set_tensorflow=lowerCamelCase__ ):
UpperCAmelCase__ = gen_random_output()
UpperCAmelCase__ = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ ,lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@require_torch
def __lowerCAmelCase ( self : Dict ):
import torch
def gen_random_output():
UpperCAmelCase__ = torch.nn.Linear(3 ,2 )
UpperCAmelCase__ = torch.rand(1 ,3 )
return model(lowerCamelCase__ ).detach().numpy()
with temp_seed(42 ,set_pytorch=lowerCamelCase__ ):
UpperCAmelCase__ = gen_random_output()
with temp_seed(42 ,set_pytorch=lowerCamelCase__ ):
UpperCAmelCase__ = gen_random_output()
UpperCAmelCase__ = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ ,lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
def __lowerCAmelCase ( self : str ):
def gen_random_output():
return np.random.rand(1 ,3 )
with temp_seed(42 ):
UpperCAmelCase__ = gen_random_output()
with temp_seed(42 ):
UpperCAmelCase__ = gen_random_output()
UpperCAmelCase__ = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ ,lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@pytest.mark.parametrize('input_data' , [{}] )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = NestedDataStructure(lowerCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = NestedDataStructure(lowerCamelCase ).flatten()
assert output == expected_output
def a_ ( ):
UpperCAmelCase__ = A(x=1 , y='foobar' )
UpperCAmelCase__ = {'x': 1, 'y': 'foobar'}
assert asdict(lowerCamelCase ) == expected_output
UpperCAmelCase__ = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]}
UpperCAmelCase__ = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]}
assert asdict(lowerCamelCase ) == expected_output
with pytest.raises(lowerCamelCase ):
asdict([1, A(x=1_0 , y='foo' )] )
def a_ ( lowerCamelCase ):
return text.split()
def a_ ( lowerCamelCase ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def a_ ( ):
with Pool(2 ) as pool:
UpperCAmelCase__ = list(iflatmap_unordered(lowerCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(lowerCamelCase ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCAmelCase__ = list(iflatmap_unordered(lowerCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(lowerCamelCase ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCAmelCase__ = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(lowerCamelCase ) == 4
| 98 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
UpperCAmelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def a_ ( lowerCamelCase , lowerCamelCase ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def a_ ( lowerCamelCase , lowerCamelCase ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def a_ ( lowerCamelCase ):
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = matrix_length // 2
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
UpperCAmelCase__ = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def a_ ( lowerCamelCase ):
return len(lowerCamelCase ), len(matrix[0] )
def a_ ( lowerCamelCase ):
print('\n'.join(str(lowerCamelCase ) for line in matrix ) )
def a_ ( lowerCamelCase , lowerCamelCase ):
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = split_matrix(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = split_matrix(lowerCamelCase )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = matrix_addition(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = matrix_addition(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def a_ ( lowerCamelCase , lowerCamelCase ):
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
UpperCAmelCase__ = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
UpperCAmelCase__ = matrix_dimensions(lowerCamelCase )
UpperCAmelCase__ = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCAmelCase__ = max(*lowerCamelCase , *lowerCamelCase )
UpperCAmelCase__ = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
UpperCAmelCase__ = matrixa
UpperCAmelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase__ : List[str] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = TextaTextGenerationPipeline(model=lowerCamelCase__ ,tokenizer=lowerCamelCase__ )
return generator, ["Something to write", "Something else"]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = generator('Something there' )
self.assertEqual(lowerCamelCase__ ,[{'generated_text': ANY(lowerCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
UpperCAmelCase__ = generator(['This is great !', 'Something else'] ,num_return_sequences=2 ,do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ ,[
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
] ,)
UpperCAmelCase__ = generator(
['This is great !', 'Something else'] ,num_return_sequences=2 ,batch_size=2 ,do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ ,[
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
] ,)
with self.assertRaises(lowerCamelCase__ ):
generator(4 )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='pt' )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ = generator('Something there' ,do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,[{'generated_text': ''}] )
UpperCAmelCase__ = 3
UpperCAmelCase__ = generator(
'Something there' ,num_return_sequences=lowerCamelCase__ ,num_beams=lowerCamelCase__ ,)
UpperCAmelCase__ = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = generator('This is a test' ,do_sample=lowerCamelCase__ ,num_return_sequences=2 ,return_tensors=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ ,[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] ,)
UpperCAmelCase__ = generator.model.config.eos_token_id
UpperCAmelCase__ = '<pad>'
UpperCAmelCase__ = generator(
['This is a test', 'This is a second test'] ,do_sample=lowerCamelCase__ ,num_return_sequences=2 ,batch_size=2 ,return_tensors=lowerCamelCase__ ,)
self.assertEqual(
lowerCamelCase__ ,[
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] ,)
@require_tf
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='tf' )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ = generator('Something there' ,do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,[{'generated_text': ''}] )
| 98 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ : List[str] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=8 ):
UpperCAmelCase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple ,lowerCamelCase__ : UNetaDConditionModel ,lowerCamelCase__ : DDPMScheduler ,lowerCamelCase__ : VQModel ,):
super().__init__()
self.register_modules(
unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,movq=lowerCamelCase__ ,)
UpperCAmelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : int ):
if latents is None:
UpperCAmelCase__ = randn_tensor(lowerCamelCase__ ,generator=lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=lowerCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase__ = latents.to(lowerCamelCase__ )
UpperCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase__ = torch.device(f'''cuda:{gpu_id}''' )
UpperCAmelCase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Tuple=0 ):
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=lowerCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ = cpu_offload_with_hook(lowerCamelCase__ ,lowerCamelCase__ ,prev_module_hook=lowerCamelCase__ )
# We'll offload the last model manually.
UpperCAmelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : List[str] ):
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase__ ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__ )
def __call__( self : str ,lowerCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowerCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : int = 512 ,lowerCamelCase__ : int = 512 ,lowerCamelCase__ : int = 100 ,lowerCamelCase__ : float = 4.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase__ : Optional[torch.FloatTensor] = None ,lowerCamelCase__ : Optional[str] = "pil" ,lowerCamelCase__ : bool = True ,):
UpperCAmelCase__ = self._execution_device
UpperCAmelCase__ = guidance_scale > 1.0
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = torch.cat(lowerCamelCase__ ,dim=0 )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = torch.cat(lowerCamelCase__ ,dim=0 )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = torch.cat(lowerCamelCase__ ,dim=0 )
UpperCAmelCase__ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase__ = image_embeds.repeat_interleave(lowerCamelCase__ ,dim=0 )
UpperCAmelCase__ = negative_image_embeds.repeat_interleave(lowerCamelCase__ ,dim=0 )
UpperCAmelCase__ = hint.repeat_interleave(lowerCamelCase__ ,dim=0 )
UpperCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([hint, hint] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCamelCase__ )
self.scheduler.set_timesteps(lowerCamelCase__ ,device=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler.timesteps
UpperCAmelCase__ = self.movq.config.latent_channels
UpperCAmelCase__ , UpperCAmelCase__ = downscale_height_and_width(lowerCamelCase__ ,lowerCamelCase__ ,self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = {'image_embeds': image_embeds, 'hint': hint}
UpperCAmelCase__ = self.unet(
sample=lowerCamelCase__ ,timestep=lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ,added_cond_kwargs=lowerCamelCase__ ,return_dict=lowerCamelCase__ ,)[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] ,dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ = variance_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,generator=lowerCamelCase__ ,)[0]
# post-processing
UpperCAmelCase__ = self.movq.decode(lowerCamelCase__ ,force_not_quantize=lowerCamelCase__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase__ = image * 0.5 + 0.5
UpperCAmelCase__ = image.clamp(0 ,1 )
UpperCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 98 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"text": Value("string" )} )
snake_case__ = Features({"summary": Value("string" )} )
snake_case__ = "text"
snake_case__ = "summary"
@property
def __lowerCAmelCase ( self : List[Any] ):
return {self.text_column: "text", self.summary_column: "summary"}
| 98 | """simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] ,lowerCamelCase__ : List[str] ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = 30
UpperCAmelCase__ = self.seq_length + self.mem_len
UpperCAmelCase__ = 15
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = [10, 50, 80]
UpperCAmelCase__ = 32
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = 8
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 2
UpperCAmelCase__ = None
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = 3
UpperCAmelCase__ = self.vocab_size - 1
UpperCAmelCase__ = 0.0_1
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def __lowerCAmelCase ( self : Union[str, Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = TFTransfoXLModel(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
UpperCAmelCase__ = {'input_ids': input_ids_a, 'mems': mems_a}
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = TFTransfoXLLMHeadModel(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
UpperCAmelCase__ = {'input_ids': input_ids_a, 'labels': lm_labels}
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
UpperCAmelCase__ , UpperCAmelCase__ = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase__ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = TFTransfoXLForSequenceClassification(lowerCamelCase__ )
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
snake_case__ = () if is_tf_available() else ()
snake_case__ = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = TFTransfoXLModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,d_embed=37 )
def __lowerCAmelCase ( self : int ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Tuple ):
self.model_tester.set_seed()
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
self.model_tester.set_seed()
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase__ = model.get_output_embeddings()
assert isinstance(lowerCamelCase__ ,tf.keras.layers.Layer )
UpperCAmelCase__ = model.get_bias()
assert name is None
else:
UpperCAmelCase__ = model.get_output_embeddings()
assert x is None
UpperCAmelCase__ = model.get_bias()
assert name is None
def __lowerCAmelCase ( self : List[str] ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFTransfoXLModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def __lowerCAmelCase ( self : str ):
pass
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
UpperCAmelCase__ = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase__ = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase__ = model.generate(lowerCamelCase__ ,max_length=200 ,do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() ,lowerCamelCase__ )
| 98 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict ):
UpperCAmelCase__ = []
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : int ):
self.events.append('on_init_end' )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[Any] ):
self.events.append('on_train_begin' )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,**lowerCamelCase__ : int ):
self.events.append('on_train_end' )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[Any] ):
self.events.append('on_epoch_begin' )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ,**lowerCamelCase__ : List[Any] ):
self.events.append('on_epoch_end' )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Union[str, Any] ):
self.events.append('on_step_begin' )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Optional[Any] ):
self.events.append('on_step_end' )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,**lowerCamelCase__ : List[str] ):
self.events.append('on_evaluate' )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Union[str, Any] ):
self.events.append('on_predict' )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[Any] ):
self.events.append('on_save' )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Union[str, Any] ):
self.events.append('on_log' )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ):
self.events.append('on_prediction_step' )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = tempfile.mkdtemp()
def __lowerCAmelCase ( self : int ):
shutil.rmtree(self.output_dir )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any]=0 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : List[str]=64 ,lowerCamelCase__ : Tuple=64 ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any=False ,**lowerCamelCase__ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
UpperCAmelCase__ = RegressionDataset(length=lowerCamelCase__ )
UpperCAmelCase__ = RegressionDataset(length=lowerCamelCase__ )
UpperCAmelCase__ = RegressionModelConfig(a=lowerCamelCase__ ,b=lowerCamelCase__ )
UpperCAmelCase__ = RegressionPreTrainedModel(lowerCamelCase__ )
UpperCAmelCase__ = TrainingArguments(self.output_dir ,disable_tqdm=lowerCamelCase__ ,report_to=[] ,**lowerCamelCase__ )
return Trainer(
lowerCamelCase__ ,lowerCamelCase__ ,train_dataset=lowerCamelCase__ ,eval_dataset=lowerCamelCase__ ,callbacks=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ):
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) )
# Order doesn't matter
UpperCAmelCase__ = sorted(lowerCamelCase__ ,key=lambda lowerCamelCase__ : cb.__name__ if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else cb.__class__.__name__ )
UpperCAmelCase__ = sorted(lowerCamelCase__ ,key=lambda lowerCamelCase__ : cb.__name__ if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowerCamelCase__ ,lowerCamelCase__ ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ ,cba.__class__ )
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(cba.__class__ ,lowerCamelCase__ )
else:
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = ['on_init_end', 'on_train_begin']
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(trainer.get_eval_dataloader() )
UpperCAmelCase__ = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(lowerCamelCase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.get_trainer()
UpperCAmelCase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase__ = self.get_trainer(disable_tqdm=lowerCamelCase__ )
UpperCAmelCase__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCamelCase__ )
expected_callbacks.remove(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
UpperCAmelCase__ = self.get_trainer()
UpperCAmelCase__ = trainer.pop_callback(lowerCamelCase__ )
self.assertEqual(cb.__class__ ,lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
trainer.add_callback(lowerCamelCase__ )
expected_callbacks.insert(0 ,lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
# We can also add, pop, or remove by instance
UpperCAmelCase__ = self.get_trainer()
UpperCAmelCase__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCamelCase__ )
expected_callbacks.remove(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
UpperCAmelCase__ = self.get_trainer()
UpperCAmelCase__ = trainer.callback_handler.callbacks[0]
UpperCAmelCase__ = trainer.pop_callback(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
trainer.add_callback(lowerCamelCase__ )
expected_callbacks.insert(0 ,lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' ,category=lowerCamelCase__ )
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
# Independent log/save/eval
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='steps' )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='epoch' )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
# A bit of everything
UpperCAmelCase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=10 ,eval_steps=5 ,evaluation_strategy='steps' ,)
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
UpperCAmelCase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowerCamelCase__ ) in warn_mock.call_args[0][0]
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : List[str] = logging.get_logger()
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = field(default_factory=__UpperCAmelCase )
snake_case__ = field(default_factory=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : Tensor ):
UpperCAmelCase__ = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase__ ,nn.Convad ) or isinstance(lowerCamelCase__ ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase__ )
def __call__( self : str ,lowerCamelCase__ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def __lowerCAmelCase ( self : Any ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0
snake_case__ = field(default_factory=__UpperCAmelCase )
snake_case__ = field(default_factory=__UpperCAmelCase )
def __call__( self : Optional[Any] ,lowerCamelCase__ : Tensor ):
UpperCAmelCase__ = Tracker(self.dest )(lowerCamelCase__ ).parametrized
UpperCAmelCase__ = Tracker(self.src )(lowerCamelCase__ ).parametrized
UpperCAmelCase__ = list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.src_skip ,lowerCamelCase__ ) )
UpperCAmelCase__ = list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.dest_skip ,lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(lowerCamelCase__ )} operations while'''
f''' destination module has {len(lowerCamelCase__ )}.''' )
for dest_m, src_m in zip(lowerCamelCase__ ,lowerCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True ):
print(f'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase__ = timm.create_model(lowerCamelCase , pretrained=lowerCamelCase ).eval()
UpperCAmelCase__ = ResNetForImageClassification(lowerCamelCase ).eval()
UpperCAmelCase__ = ModuleTransfer(src=lowerCamelCase , dest=lowerCamelCase )
UpperCAmelCase__ = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowerCamelCase )
assert torch.allclose(from_model(lowerCamelCase ) , our_model(lowerCamelCase ).logits ), "The model logits don't match the original one."
UpperCAmelCase__ = f'''resnet{"-".join(name.split("resnet" ) )}'''
print(lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
# we can use the convnext one
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
print(f'''Pushed {checkpoint_name}''' )
def a_ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True ):
UpperCAmelCase__ = 'imagenet-1k-id2label.json'
UpperCAmelCase__ = 1_0_0_0
UpperCAmelCase__ = (1, num_labels)
UpperCAmelCase__ = 'huggingface/label-files'
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = partial(lowerCamelCase , num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
UpperCAmelCase__ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCamelCase , names_to_config[model_name] , lowerCamelCase , lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
lowerCAmelCase__ : List[Any] = parser.parse_args()
lowerCAmelCase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 98 | """simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase , lowerCamelCase ):
if b == 0:
return (1, 0)
((UpperCAmelCase__) , (UpperCAmelCase__)) = extended_euclid(lowerCamelCase , a % b )
UpperCAmelCase__ = a // b
return (y, x - k * y)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
((UpperCAmelCase__) , (UpperCAmelCase__)) = extended_euclid(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = na * na
UpperCAmelCase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def a_ ( lowerCamelCase , lowerCamelCase ):
((UpperCAmelCase__) , (UpperCAmelCase__)) = extended_euclid(lowerCamelCase , lowerCamelCase )
if b < 0:
UpperCAmelCase__ = (b % n + n) % n
return b
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = invert_modulo(lowerCamelCase , lowerCamelCase ), invert_modulo(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = na * na
UpperCAmelCase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int=sys.maxsize ):
UpperCAmelCase__ = 'bilinear'
UpperCAmelCase__ = max_size
UpperCAmelCase__ = short_edge_length
def __call__( self : List[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ = size * 1.0 / min(lowerCamelCase__ ,lowerCamelCase__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ = scale * h, size
if max(lowerCamelCase__ ,lowerCamelCase__ ) > self.max_size:
UpperCAmelCase__ = self.max_size * 1.0 / max(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = newh * scale
UpperCAmelCase__ = neww * scale
UpperCAmelCase__ = int(neww + 0.5 )
UpperCAmelCase__ = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ = Image.fromarray(lowerCamelCase__ )
UpperCAmelCase__ = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR )
UpperCAmelCase__ = np.asarray(lowerCamelCase__ )
else:
UpperCAmelCase__ = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ = nn.functional.interpolate(
lowerCamelCase__ ,(newh, neww) ,mode=self.interp_method ,align_corners=lowerCamelCase__ ).squeeze(0 )
img_augs.append(lowerCamelCase__ )
return img_augs
class snake_case :
"""simple docstring"""
def __init__( self : List[str] ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ = cfg.INPUT.FORMAT
UpperCAmelCase__ = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ = cfg.PAD_VALUE
UpperCAmelCase__ = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ = cfg.MODEL.DEVICE
UpperCAmelCase__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
UpperCAmelCase__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
UpperCAmelCase__ = lambda lowerCamelCase__ : (x - self.pixel_mean) / self.pixel_std
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = tuple(max(lowerCamelCase__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ = [im.shape[-2:] for im in images]
UpperCAmelCase__ = [
nn.functional.pad(
lowerCamelCase__ ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,)
for size, im in zip(lowerCamelCase__ ,lowerCamelCase__ )
]
return torch.stack(lowerCamelCase__ ), torch.tensor(lowerCamelCase__ )
def __call__( self : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : str=False ):
with torch.no_grad():
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = [images]
if single_image:
assert len(lowerCamelCase__ ) == 1
for i in range(len(lowerCamelCase__ ) ):
if isinstance(images[i] ,torch.Tensor ):
images.insert(lowerCamelCase__ ,images.pop(lowerCamelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] ,torch.Tensor ):
images.insert(
lowerCamelCase__ ,torch.as_tensor(img_tensorize(images.pop(lowerCamelCase__ ) ,input_format=self.input_format ) )
.to(self.device )
.float() ,)
# resize smallest edge
UpperCAmelCase__ = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ = self.aug(lowerCamelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ = [self.normalizer(lowerCamelCase__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ = self.pad(lowerCamelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ = torch.true_divide(lowerCamelCase__ ,lowerCamelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a_ ( lowerCamelCase , lowerCamelCase ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a_ ( lowerCamelCase , lowerCamelCase ):
assert torch.isfinite(lowerCamelCase ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCamelCase )
tensor[:, 1].clamp_(min=0 , max=lowerCamelCase )
tensor[:, 2].clamp_(min=0 , max=lowerCamelCase )
tensor[:, 3].clamp_(min=0 , max=lowerCamelCase )
| 98 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | 1 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
"""simple docstring"""
def __init__( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=True ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Tuple=99 ,lowerCamelCase__ : List[Any]=16 ,lowerCamelCase__ : Union[str, Any]=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Optional[int]=36 ,lowerCamelCase__ : Optional[Any]="gelu" ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Tuple=16 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : Tuple=None ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int ):
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.get_config()
UpperCAmelCase__ = 300
return config
def __lowerCAmelCase ( self : Optional[int] ):
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = MraModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
UpperCAmelCase__ = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,):
UpperCAmelCase__ = True
UpperCAmelCase__ = MraModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ,encoder_attention_mask=lowerCamelCase__ ,)
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ,)
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = MraForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = MraForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,start_positions=lowerCamelCase__ ,end_positions=lowerCamelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MraForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MraForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = MraForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = ()
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = MraModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def __lowerCAmelCase ( self : Any ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : Any ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = MraModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='MRA does not output attentions' )
def __lowerCAmelCase ( self : List[str] ):
return
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
UpperCAmelCase__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ )[0]
UpperCAmelCase__ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
UpperCAmelCase__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ )[0]
UpperCAmelCase__ = 50_265
UpperCAmelCase__ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
UpperCAmelCase__ = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ )[0]
UpperCAmelCase__ = 50_265
UpperCAmelCase__ = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ):
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : Optional[Any] ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 50 ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[str] = "pil" ,lowerCamelCase__ : bool = True ,):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size ,lowerCamelCase__ ):
UpperCAmelCase__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCAmelCase__ = randn_tensor(lowerCamelCase__ ,generator=lowerCamelCase__ ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ = self.unet(lowerCamelCase__ ,lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,eta=lowerCamelCase__ ,use_clipped_model_output=lowerCamelCase__ ,generator=lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ : Union[str, Any] = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
lowerCAmelCase__ : int = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ : int = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
lowerCAmelCase__ : Optional[Any] = {F"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = FunnelTokenizer
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = 2
def __init__( self : Dict ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : List[str]="<unk>" ,lowerCamelCase__ : int="<sep>" ,lowerCamelCase__ : Any="<pad>" ,lowerCamelCase__ : Any="<cls>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,lowerCamelCase__ : Optional[Any]="<s>" ,lowerCamelCase__ : Any="</s>" ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : List[str]="##" ,**lowerCamelCase__ : List[Any] ,):
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,clean_text=lowerCamelCase__ ,tokenize_chinese_chars=lowerCamelCase__ ,strip_accents=lowerCamelCase__ ,wordpieces_prefix=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' ,lowerCamelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,lowerCamelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(lowerCamelCase__ ,normalizer_state.pop('type' ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**lowerCamelCase__ )
UpperCAmelCase__ = do_lower_case
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int]=None ):
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 98 | """simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "upernet"
def __init__( self : int ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : List[Any]=512 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : List[Any]=[1, 2, 3, 6] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=0.4 ,lowerCamelCase__ : int=384 ,lowerCamelCase__ : int=256 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Union[str, Any]=255 ,**lowerCamelCase__ : str ,):
super().__init__(**lowerCamelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCAmelCase__ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = backbone_config.get('model_type' )
UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ = config_class.from_dict(lowerCamelCase__ )
UpperCAmelCase__ = backbone_config
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = pool_scales
UpperCAmelCase__ = use_auxiliary_head
UpperCAmelCase__ = auxiliary_loss_weight
UpperCAmelCase__ = auxiliary_in_channels
UpperCAmelCase__ = auxiliary_channels
UpperCAmelCase__ = auxiliary_num_convs
UpperCAmelCase__ = auxiliary_concat_input
UpperCAmelCase__ = loss_ignore_index
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 98 | """simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCAmelCase__ : Tuple = datasets.utils.logging.get_logger(__name__)
lowerCAmelCase__ : Any = ['names', 'prefix']
lowerCAmelCase__ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowerCAmelCase__ : List[str] = ['encoding_errors', 'on_bad_lines']
lowerCAmelCase__ : Optional[Any] = ['date_format']
@dataclass
class snake_case ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case__ = ","
snake_case__ = None
snake_case__ = "infer"
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = True
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = False
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = True
snake_case__ = True
snake_case__ = False
snake_case__ = True
snake_case__ = None
snake_case__ = "."
snake_case__ = None
snake_case__ = '"'
snake_case__ = 0
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = True
snake_case__ = True
snake_case__ = 0
snake_case__ = True
snake_case__ = False
snake_case__ = None
snake_case__ = 1_00_00
snake_case__ = None
snake_case__ = "strict"
snake_case__ = "error"
snake_case__ = None
def __lowerCAmelCase ( self : int ):
if self.delimiter is not None:
UpperCAmelCase__ = self.delimiter
if self.column_names is not None:
UpperCAmelCase__ = self.column_names
@property
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,lowerCamelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case__ = CsvConfig
def __lowerCAmelCase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Optional[Any] ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCAmelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ ,(str, list, tuple) ):
UpperCAmelCase__ = data_files
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = [files]
UpperCAmelCase__ = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
UpperCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = [files]
UpperCAmelCase__ = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ ,gen_kwargs={'files': files} ) )
return splits
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : pa.Table ):
if self.config.features is not None:
UpperCAmelCase__ = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCamelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=lowerCamelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase__ = table_cast(lowerCamelCase__ ,lowerCamelCase__ )
return pa_table
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase__ ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
UpperCAmelCase__ = pd.read_csv(lowerCamelCase__ ,iterator=lowerCamelCase__ ,dtype=lowerCamelCase__ ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = pa.Table.from_pandas(lowerCamelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase__ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCamelCase__ )}: {e}''' )
raise
| 98 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "BridgeTowerImageProcessor"
snake_case__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Union[str, Any] ):
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __call__( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCamelCase__ : Tuple ,):
UpperCAmelCase__ = self.tokenizer(
text=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,)
# add pixel_values + pixel_mask
UpperCAmelCase__ = self.image_processor(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_center_crop=lowerCamelCase__ ,**lowerCamelCase__ )
encoding.update(lowerCamelCase__ )
return encoding
def __lowerCAmelCase ( self : Union[str, Any] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Optional[Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : List[str] ):
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = BlenderbotSmallTokenizer
snake_case__ = False
def __lowerCAmelCase ( self : List[str] ):
super().setUp()
UpperCAmelCase__ = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
UpperCAmelCase__ = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
UpperCAmelCase__ = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
UpperCAmelCase__ = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
UpperCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowerCAmelCase ( self : Optional[int] ,**lowerCamelCase__ : Any ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = 'adapt act apte'
UpperCAmelCase__ = 'adapt act apte'
return input_text, output_text
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase__ = 'adapt act apte'
UpperCAmelCase__ = ['adapt', 'act', 'ap@@', 'te']
UpperCAmelCase__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase__ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1_384]
UpperCAmelCase__ = 'I am a small frog.'
UpperCAmelCase__ = tok([src_text] ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ )['input_ids']
UpperCAmelCase__ = tok.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ,clean_up_tokenization_spaces=lowerCamelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
UpperCAmelCase__ = 'I am a small frog .'
UpperCAmelCase__ = '.'
UpperCAmelCase__ = tok(lowerCamelCase__ )['input_ids']
UpperCAmelCase__ = tok(lowerCamelCase__ )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = KandinskyInpaintPipeline
snake_case__ = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
snake_case__ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
snake_case__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : Any ):
return 32
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return 32
@property
def __lowerCAmelCase ( self : Dict ):
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Dict ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Dict ):
return 100
@property
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_005 ,)
UpperCAmelCase__ = MultilingualCLIP(lowerCamelCase__ )
UpperCAmelCase__ = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCAmelCase__ = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : str ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = self.dummy_tokenizer
UpperCAmelCase__ = self.dummy_unet
UpperCAmelCase__ = self.dummy_movq
UpperCAmelCase__ = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule='linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=lowerCamelCase__ ,)
UpperCAmelCase__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict=0 ):
UpperCAmelCase__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(lowerCamelCase__ )
# create init_image
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
UpperCAmelCase__ = np.ones((64, 64) ,dtype=np.floataa )
UpperCAmelCase__ = 0
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) ,return_dict=lowerCamelCase__ ,)[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __lowerCAmelCase ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCAmelCase__ = np.ones((768, 768) ,dtype=np.floataa )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 'a hat'
UpperCAmelCase__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
UpperCAmelCase__ = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' ,torch_dtype=torch.floataa )
UpperCAmelCase__ = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ = pipe_prior(
lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
UpperCAmelCase__ = pipeline(
lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,image_embeds=lowerCamelCase__ ,negative_image_embeds=lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=100 ,height=768 ,width=768 ,output_type='np' ,)
UpperCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase__ : str = ['text', 'image', 'audio']
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(lowerCamelCase , lowerCamelCase ):
inputs.append(create_inputs(lowerCamelCase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for output in outputs:
if isinstance(lowerCamelCase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class snake_case :
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
self.assertTrue(hasattr(self.tool ,'inputs' ) )
self.assertTrue(hasattr(self.tool ,'outputs' ) )
UpperCAmelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCamelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase__ = [outputs]
self.assertListEqual(output_types(lowerCamelCase__ ) ,self.tool.outputs )
def __lowerCAmelCase ( self : List[str] ):
self.assertTrue(hasattr(self.tool ,'description' ) )
self.assertTrue(hasattr(self.tool ,'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase__ )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase__ ,self.tool.outputs ):
UpperCAmelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase__ ,lowerCamelCase__ ) )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = []
for _input, input_type in zip(lowerCamelCase__ ,self.tool.inputs ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase__ = self.tool(*lowerCamelCase__ )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase__ ) ,len(self.tool.outputs ) )
| 98 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
lowerCAmelCase__ : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
lowerCAmelCase__ : Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "whisper"
snake_case__ = ["past_key_values"]
snake_case__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : str ,lowerCamelCase__ : Tuple=51_865 ,lowerCamelCase__ : Optional[Any]=80 ,lowerCamelCase__ : Tuple=6 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : int=6 ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : Union[str, Any]=1_536 ,lowerCamelCase__ : Optional[int]=1_536 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Dict=50_257 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : List[str]="gelu" ,lowerCamelCase__ : Dict=256 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : Optional[Any]=1_500 ,lowerCamelCase__ : Union[str, Any]=448 ,lowerCamelCase__ : Tuple=50_256 ,lowerCamelCase__ : List[str]=50_256 ,lowerCamelCase__ : List[Any]=50_256 ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Tuple=[220, 50_256] ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : Dict=256 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Tuple=0.0_5 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : int=10 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[Any]=7 ,**lowerCamelCase__ : Dict ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = max_source_positions
UpperCAmelCase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase__ = classifier_proj_size
UpperCAmelCase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
UpperCAmelCase__ = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,suppress_tokens=lowerCamelCase__ ,begin_suppress_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase__ = {0: 'batch'}
else:
UpperCAmelCase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ ,direction='inputs' )
return common_inputs
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional["TensorType"] = None ,lowerCamelCase__ : int = 22_050 ,lowerCamelCase__ : float = 5.0 ,lowerCamelCase__ : int = 220 ,):
UpperCAmelCase__ = OrderedDict()
UpperCAmelCase__ = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=lowerCamelCase__ ,framework=lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,time_duration=lowerCamelCase__ ,frequency=lowerCamelCase__ ,)
UpperCAmelCase__ = encoder_inputs['input_features'].shape[2]
UpperCAmelCase__ = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase__ = super().generate_dummy_inputs(
preprocessor.tokenizer ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = encoder_inputs.pop('input_features' )
UpperCAmelCase__ = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase__ = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def __lowerCAmelCase ( self : str ):
return 1e-3
| 98 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
UpperCAmelCase__ = [2, 1, 2, -1]
UpperCAmelCase__ = [1, 2, 3, 4]
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = len(self.first_signal )
UpperCAmelCase__ = len(self.second_signal )
UpperCAmelCase__ = max(lowerCamelCase__ ,lowerCamelCase__ )
# create a zero matrix of max_length x max_length
UpperCAmelCase__ = [[0] * max_length for i in range(lowerCamelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase__ ):
UpperCAmelCase__ = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase__ )
for j, item in enumerate(lowerCamelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCAmelCase__ = np.matmul(np.transpose(lowerCamelCase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any]=13 ,lowerCamelCase__ : int=30 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Tuple=32 ,lowerCamelCase__ : Union[str, Any]=5 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : str=37 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : List[str]=10 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=2 ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase__ = (image_size // patch_size) ** 2
UpperCAmelCase__ = num_patches + 2
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Any ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = DeiTModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def __lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __lowerCAmelCase ( self : str ):
pass
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str]=False ):
UpperCAmelCase__ = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self : List[str] ):
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCAmelCase__ = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
UpperCAmelCase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ = False
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
UpperCAmelCase__ = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
UpperCAmelCase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCAmelCase__ = problem_type['title']
UpperCAmelCase__ = problem_type['num_labels']
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCAmelCase__ = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
UpperCAmelCase__ = inputs['labels'].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
UpperCAmelCase__ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
UpperCAmelCase__ = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __lowerCAmelCase ( self : int ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def a_ ( ):
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Tuple ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
lowerCamelCase__ )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**lowerCamelCase__ )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' ,torch_dtype=torch.floataa ,device_map='auto' )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=lowerCamelCase__ ,return_tensors='pt' )
UpperCAmelCase__ = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ )
| 98 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def a_ ( lowerCamelCase = 2_0_0_0_0_0_0 ):
UpperCAmelCase__ = [0]
UpperCAmelCase__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCAmelCase__ = 0
# the area corresponding to the grid that gives the product closest to target
UpperCAmelCase__ = 0
# an estimate of b, using the quadratic formula
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the triangle number corresponding to b_floor
UpperCAmelCase__ = 42
# the triangle number corresponding to b_ceil
UpperCAmelCase__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
UpperCAmelCase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCAmelCase__ = floor(lowerCamelCase )
UpperCAmelCase__ = ceil(lowerCamelCase )
UpperCAmelCase__ = triangle_numbers[b_floor]
UpperCAmelCase__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_first_guess * triangle_a
UpperCAmelCase__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_second_guess * triangle_a
UpperCAmelCase__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
UpperCAmelCase__ , UpperCAmelCase__ = coefficient_matrix.shape
UpperCAmelCase__ , UpperCAmelCase__ = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase__ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowerCamelCase )
if colsa != 1:
UpperCAmelCase__ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowerCamelCase )
if rowsa != rowsa:
UpperCAmelCase__ = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowerCamelCase )
if len(lowerCamelCase ) != rowsa:
UpperCAmelCase__ = (
'Number of initial values must be equal to number of rows in coefficient '
f'''matrix but received {len(lowerCamelCase )} and {rowsa}'''
)
raise ValueError(lowerCamelCase )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
UpperCAmelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCAmelCase__ , UpperCAmelCase__ = table.shape
strictly_diagonally_dominant(lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase ):
UpperCAmelCase__ = []
for row in range(lowerCamelCase ):
UpperCAmelCase__ = 0
for col in range(lowerCamelCase ):
if col == row:
UpperCAmelCase__ = table[row][col]
elif col == cols - 1:
UpperCAmelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase__ = (temp + val) / denom
new_val.append(lowerCamelCase )
UpperCAmelCase__ = new_val
return [float(lowerCamelCase ) for i in new_val]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = table.shape
UpperCAmelCase__ = True
for i in range(0 , lowerCamelCase ):
UpperCAmelCase__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a_ ( lowerCamelCase ):
return x + 2
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = 'x = 3'
UpperCAmelCase__ = {}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ ,{'x': 3} )
UpperCAmelCase__ = 'x = y'
UpperCAmelCase__ = {'y': 5}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 5, 'y': 5} )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = 'y = add_two(x)'
UpperCAmelCase__ = {'x': 3}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = 'x = 3'
UpperCAmelCase__ = {}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ ,{'x': 3} )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
UpperCAmelCase__ = {'x': 3}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 5} )
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = 'x = 3\ny = 5'
UpperCAmelCase__ = {}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 5} )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'text = f\'This is x: {x}.\''
UpperCAmelCase__ = {'x': 3}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'text': 'This is x: 3.'} )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = 'if x <= 3:\n y = 2\nelse:\n y = 5'
UpperCAmelCase__ = {'x': 3}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 2} )
UpperCAmelCase__ = {'x': 8}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 8, 'y': 5} )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'test_list = [x, add_two(x)]'
UpperCAmelCase__ = {'x': 3}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,[3, 5] )
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'test_list': [3, 5]} )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = 'y = x'
UpperCAmelCase__ = {'x': 3}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 3} )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = 'test_list = [x, add_two(x)]\ntest_list[1]'
UpperCAmelCase__ = {'x': 3}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'test_list': [3, 5]} )
UpperCAmelCase__ = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
UpperCAmelCase__ = {'x': 3}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = 'x = 0\nfor i in range(3):\n x = i'
UpperCAmelCase__ = {}
UpperCAmelCase__ = evaluate(lowerCamelCase__ ,{'range': range} ,state=lowerCamelCase__ )
assert result == 2
self.assertDictEqual(lowerCamelCase__ ,{'x': 2, 'i': 2} )
| 98 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.dummy_uncond_unet
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type='numpy' ).images
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type='numpy' ,return_dict=lowerCamelCase__ )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = 'google/ncsnpp-celebahq-256'
UpperCAmelCase__ = UNetaDModel.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type='numpy' ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase__ : str = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowerCAmelCase__ : List[str] = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowerCAmelCase__ : Optional[Any] = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,)
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = 0.0
for i, j in zip(lowerCamelCase__ ,lowerCamelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCamelCase__ ,lowerCamelCase__ ) else 0.0
UpperCAmelCase__ = n_correct / len(lowerCamelCase__ )
return {
"accuracy": accuracy,
}
| 98 | """simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = args.log_outputs
UpperCAmelCase__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ = load_metric('wer' )
UpperCAmelCase__ = load_metric('cer' )
# compute metrics
UpperCAmelCase__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
UpperCAmelCase__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
UpperCAmelCase__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(lowerCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(lowerCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ = f'''log_{dataset_id}_predictions.txt'''
UpperCAmelCase__ = f'''log_{dataset_id}_targets.txt'''
with open(lowerCamelCase , 'w' ) as p, open(lowerCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase , lowerCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowerCamelCase , with_indices=lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ = re.sub(lowerCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
UpperCAmelCase__ = ' '.join(text.split(lowerCamelCase ) )
return text
def a_ ( lowerCamelCase ):
# load dataset
UpperCAmelCase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ = dataset.cast_column('audio' , Audio(sampling_rate=lowerCamelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase ):
UpperCAmelCase__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ = prediction['text']
UpperCAmelCase__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
UpperCAmelCase__ = dataset.map(lowerCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowerCAmelCase__ : Dict = parser.parse_args()
main(args)
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = BartphoTokenizer
snake_case__ = False
snake_case__ = True
def __lowerCAmelCase ( self : Optional[Any] ):
super().setUp()
UpperCAmelCase__ = ['▁This', '▁is', '▁a', '▁t', 'est']
UpperCAmelCase__ = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
UpperCAmelCase__ = {'unk_token': '<unk>'}
UpperCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file ,'w' ,encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
UpperCAmelCase__ = BartphoTokenizer(lowerCamelCase__ ,self.monolingual_vocab_file ,**self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ,**lowerCamelCase__ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = 'This is a là test'
UpperCAmelCase__ = 'This is a<unk><unk> test'
return input_text, output_text
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = BartphoTokenizer(lowerCamelCase__ ,self.monolingual_vocab_file ,**self.special_tokens_map )
UpperCAmelCase__ = 'This is a là test'
UpperCAmelCase__ = '▁This ▁is ▁a ▁l à ▁t est'.split()
UpperCAmelCase__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=__UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["note_seq"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : int ,**lowerCamelCase__ : List[Any] ):
requires_backends(self ,['note_seq'] )
@classmethod
def __lowerCAmelCase ( cls : int ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Tuple ):
requires_backends(cls ,['note_seq'] )
@classmethod
def __lowerCAmelCase ( cls : int ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Optional[Any] ):
requires_backends(cls ,['note_seq'] )
| 98 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | 1 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ : int = 256
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["melgan"]
def __init__( self : List[str] ,lowerCamelCase__ : SpectrogramNotesEncoder ,lowerCamelCase__ : SpectrogramContEncoder ,lowerCamelCase__ : TaFilmDecoder ,lowerCamelCase__ : DDPMScheduler ,lowerCamelCase__ : OnnxRuntimeModel if is_onnx_available() else Any ,):
super().__init__()
# From MELGAN
UpperCAmelCase__ = math.log(1e-5 ) # Matches MelGAN training.
UpperCAmelCase__ = 4.0 # Largest value for most examples
UpperCAmelCase__ = 128
self.register_modules(
notes_encoder=lowerCamelCase__ ,continuous_encoder=lowerCamelCase__ ,decoder=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,melgan=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str]=(-1.0, 1.0) ,lowerCamelCase__ : str=False ):
UpperCAmelCase__ , UpperCAmelCase__ = output_range
if clip:
UpperCAmelCase__ = torch.clip(lowerCamelCase__ ,self.min_value ,self.max_value )
# Scale to [0, 1].
UpperCAmelCase__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple=(-1.0, 1.0) ,lowerCamelCase__ : List[str]=False ):
UpperCAmelCase__ , UpperCAmelCase__ = input_range
UpperCAmelCase__ = torch.clip(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) if clip else outputs
# Scale to [0, 1].
UpperCAmelCase__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ):
UpperCAmelCase__ = input_tokens > 0
UpperCAmelCase__ , UpperCAmelCase__ = self.notes_encoder(
encoder_input_tokens=lowerCamelCase__ ,encoder_inputs_mask=lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = self.continuous_encoder(
encoder_inputs=lowerCamelCase__ ,encoder_inputs_mask=lowerCamelCase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = noise_time
if not torch.is_tensor(lowerCamelCase__ ):
UpperCAmelCase__ = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
UpperCAmelCase__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase__ = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
UpperCAmelCase__ = self.decoder(
encodings_and_masks=lowerCamelCase__ ,decoder_input_tokens=lowerCamelCase__ ,decoder_noise_time=lowerCamelCase__ )
return logits
@torch.no_grad()
def __call__( self : Any ,lowerCamelCase__ : List[List[int]] ,lowerCamelCase__ : Optional[torch.Generator] = None ,lowerCamelCase__ : int = 100 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : str = "numpy" ,lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowerCamelCase__ : int = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCamelCase__ )}.''' )
UpperCAmelCase__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
UpperCAmelCase__ = np.zeros([1, 0, self.n_dims] ,np.floataa )
UpperCAmelCase__ = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase__ ,device=self.device )
for i, encoder_input_tokens in enumerate(lowerCamelCase__ ):
if i == 0:
UpperCAmelCase__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCAmelCase__ = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase__ ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCAmelCase__ = ones
UpperCAmelCase__ = self.scale_features(
lowerCamelCase__ ,output_range=[-1.0, 1.0] ,clip=lowerCamelCase__ )
UpperCAmelCase__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=lowerCamelCase__ ,continuous_mask=lowerCamelCase__ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCAmelCase__ = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=lowerCamelCase__ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.decode(
encodings_and_masks=lowerCamelCase__ ,input_tokens=lowerCamelCase__ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,generator=lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = self.scale_to_features(lowerCamelCase__ ,input_range=[-1.0, 1.0] )
UpperCAmelCase__ = mel[:1]
UpperCAmelCase__ = mel.cpu().float().numpy()
UpperCAmelCase__ = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase__ ,lowerCamelCase__ )
logger.info('Generated segment' ,lowerCamelCase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
UpperCAmelCase__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCAmelCase__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowerCamelCase__ )
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ : str = '\\n Text data.\n Second line of data.'
lowerCAmelCase__ : List[Any] = 'file'
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with zstd.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture
def a_ ( lowerCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , lowerCamelCase ) , 'w' ) as f:
f.write(lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
UpperCAmelCase__ = input_paths[compression_format]
UpperCAmelCase__ = tmp_path / 'cache'
UpperCAmelCase__ = DownloadConfig(cache_dir=lowerCamelCase , extract_compressed_file=lowerCamelCase )
UpperCAmelCase__ = cached_path(lowerCamelCase , download_config=lowerCamelCase )
with open(lowerCamelCase ) as f:
UpperCAmelCase__ = f.read()
with open(lowerCamelCase ) as f:
UpperCAmelCase__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = 'custom_cache'
UpperCAmelCase__ = 'custom_extracted_dir'
UpperCAmelCase__ = tmp_path / 'custom_extracted_path'
if default_extracted:
UpperCAmelCase__ = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , lowerCamelCase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCamelCase ) )
UpperCAmelCase__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase__ = xz_file
UpperCAmelCase__ = (
DownloadConfig(extract_compressed_file=lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCamelCase )
)
UpperCAmelCase__ = cached_path(lowerCamelCase , download_config=lowerCamelCase )
assert Path(lowerCamelCase ).parent.parts[-2:] == expected
def a_ ( lowerCamelCase ):
# absolute path
UpperCAmelCase__ = str(Path(lowerCamelCase ).resolve() )
assert cached_path(lowerCamelCase ) == text_file
# relative path
UpperCAmelCase__ = str(Path(lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase ) == text_file
def a_ ( lowerCamelCase ):
# absolute path
UpperCAmelCase__ = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowerCamelCase ):
cached_path(lowerCamelCase )
# relative path
UpperCAmelCase__ = './__missing_file__.txt'
with pytest.raises(lowerCamelCase ):
cached_path(lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase ) as f:
UpperCAmelCase__ = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase )
def a_ ( ):
with pytest.raises(lowerCamelCase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase ):
http_get('https://huggingface.co' , temp_file=lowerCamelCase )
with pytest.raises(lowerCamelCase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase ):
ftp_get('ftp://huggingface.co' , temp_file=lowerCamelCase )
with pytest.raises(lowerCamelCase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase ):
fsspec_get('s3://huggingface.co' , temp_file=lowerCamelCase )
with pytest.raises(lowerCamelCase ):
fsspec_head('s3://huggingface.co' )
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCAmelCase__ : Any = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | 1 |
"""simple docstring"""
class snake_case :
"""simple docstring"""
def __init__( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : List[str]=None ):
UpperCAmelCase__ = data
UpperCAmelCase__ = previous
UpperCAmelCase__ = next_node
def __str__( self : List[Any] ):
return f'''{self.data}'''
def __lowerCAmelCase ( self : str ):
return self.data
def __lowerCAmelCase ( self : Tuple ):
return self.next
def __lowerCAmelCase ( self : List[Any] ):
return self.previous
class snake_case :
"""simple docstring"""
def __init__( self : Any ,lowerCamelCase__ : List[Any] ):
UpperCAmelCase__ = head
def __iter__( self : Optional[int] ):
return self
def __lowerCAmelCase ( self : List[Any] ):
if not self.current:
raise StopIteration
else:
UpperCAmelCase__ = self.current.get_data()
UpperCAmelCase__ = self.current.get_next()
return value
class snake_case :
"""simple docstring"""
def __init__( self : Dict ):
UpperCAmelCase__ = None # First node in list
UpperCAmelCase__ = None # Last node in list
def __str__( self : Tuple ):
UpperCAmelCase__ = self.head
UpperCAmelCase__ = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase__ = current.get_next()
return " ".join(str(lowerCamelCase__ ) for node in nodes )
def __contains__( self : Optional[int] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase__ = current.get_next()
return False
def __iter__( self : str ):
return LinkedListIterator(self.head )
def __lowerCAmelCase ( self : str ):
if self.head:
return self.head.get_data()
return None
def __lowerCAmelCase ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Node ):
if self.head is None:
UpperCAmelCase__ = node
UpperCAmelCase__ = node
else:
self.insert_before_node(self.head ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Node ):
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.insert_after_node(self.tail ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = Node(lowerCamelCase__ )
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.set_tail(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Node ,lowerCamelCase__ : Node ):
UpperCAmelCase__ = node
UpperCAmelCase__ = node.previous
if node.get_previous() is None:
UpperCAmelCase__ = node_to_insert
else:
UpperCAmelCase__ = node_to_insert
UpperCAmelCase__ = node_to_insert
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Node ,lowerCamelCase__ : Node ):
UpperCAmelCase__ = node
UpperCAmelCase__ = node.next
if node.get_next() is None:
UpperCAmelCase__ = node_to_insert
else:
UpperCAmelCase__ = node_to_insert
UpperCAmelCase__ = node_to_insert
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
UpperCAmelCase__ = 1
UpperCAmelCase__ = Node(lowerCamelCase__ )
UpperCAmelCase__ = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase__ ,lowerCamelCase__ )
return
current_position += 1
UpperCAmelCase__ = node.next
self.insert_after_node(self.tail ,lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : int ):
UpperCAmelCase__ = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase__ = node.get_next()
raise Exception('Node not found' )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Any ):
if (node := self.get_node(lowerCamelCase__ )) is not None:
if node == self.head:
UpperCAmelCase__ = self.head.get_next()
if node == self.tail:
UpperCAmelCase__ = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase__ )
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : Node ):
if node.get_next():
UpperCAmelCase__ = node.previous
if node.get_previous():
UpperCAmelCase__ = node.next
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __lowerCAmelCase ( self : Union[str, Any] ):
return self.head is None
def a_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase__ = True
for i in range(0 , len(lowerCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ = False
for i in range(1 , len(lowerCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
lowerCAmelCase__ : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase__ : List[Any] = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 98 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
def __init__( self : Tuple ,lowerCamelCase__ : UNetaDModel ,lowerCamelCase__ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : str ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = 2_000 ,lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase__ : Optional[str] = "pil" ,lowerCamelCase__ : bool = True ,**lowerCamelCase__ : Optional[int] ,):
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
UpperCAmelCase__ = randn_tensor(lowerCamelCase__ ,generator=lowerCamelCase__ ) * self.scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase__ )
self.scheduler.set_sigmas(lowerCamelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase__ = self.unet(lowerCamelCase__ ,lowerCamelCase__ ).sample
UpperCAmelCase__ = self.scheduler.step_correct(lowerCamelCase__ ,lowerCamelCase__ ,generator=lowerCamelCase__ ).prev_sample
# prediction step
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ ).sample
UpperCAmelCase__ = self.scheduler.step_pred(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,generator=lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ = sample_mean.clamp(0 ,1 )
UpperCAmelCase__ = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 98 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
while b:
UpperCAmelCase__ , UpperCAmelCase__ = b, a % b
return a
def a_ ( lowerCamelCase , lowerCamelCase ):
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase , a % b )
def a_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCAmelCase__ : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowerCAmelCase__ : int = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
lowerCAmelCase__ : Any = '|'.join(sys.argv[1:])
lowerCAmelCase__ : List[Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""")
lowerCAmelCase__ : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase__ : Dict = pytest.mark.integration
@require_faiss
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCamelCase__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[Any] ):
import faiss
UpperCAmelCase__ = self._create_dummy_dataset()
UpperCAmelCase__ = dset.map(
lambda lowerCamelCase__ ,lowerCamelCase__ : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ )
UpperCAmelCase__ = dset.add_faiss_index('vecs' ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase__ , UpperCAmelCase__ = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : List[str] ):
import faiss
UpperCAmelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
UpperCAmelCase__ , UpperCAmelCase__ = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def __lowerCAmelCase ( self : List[Any] ):
import faiss
UpperCAmelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase__ ) as tmp_file:
dset.save_faiss_index('vecs' ,tmp_file.name )
dset.load_faiss_index('vecs2' ,tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase__ , UpperCAmelCase__ = dset.get_nearest_examples('vecs2' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCamelCase__ ,partial(dset.get_nearest_examples ,'vecs2' ,np.ones(5 ,dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : str ):
from elasticsearch import Elasticsearch
UpperCAmelCase__ = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCAmelCase__ = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCAmelCase__ = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
UpperCAmelCase__ = Elasticsearch()
dset.add_elasticsearch_index('filename' ,es_client=lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = dset.get_nearest_examples('filename' ,'my_name-train_29' )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
@require_faiss
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
import faiss
UpperCAmelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
UpperCAmelCase__ = np.zeros(5 ,dtype=np.floataa )
UpperCAmelCase__ = 1
UpperCAmelCase__ , UpperCAmelCase__ = index.search(lowerCamelCase__ )
self.assertRaises(lowerCamelCase__ ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
UpperCAmelCase__ = np.eye(5 ,dtype=np.floataa )[::-1]
UpperCAmelCase__ , UpperCAmelCase__ = index.search_batch(lowerCamelCase__ )
self.assertRaises(lowerCamelCase__ ,index.search_batch ,queries[0] )
UpperCAmelCase__ = [scores[0] for scores in total_scores]
UpperCAmelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase__ ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
import faiss
UpperCAmelCase__ = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
UpperCAmelCase__ = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = FaissIndex(string_factory='Flat' ,custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : str ):
import faiss
UpperCAmelCase__ = faiss.IndexFlat(5 )
UpperCAmelCase__ = FaissIndex(custom_index=lowerCamelCase__ )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def __lowerCAmelCase ( self : List[Any] ):
import faiss
UpperCAmelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase__ ) as tmp_file:
index.save(tmp_file.name )
UpperCAmelCase__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase__ = np.zeros(5 ,dtype=np.floataa )
UpperCAmelCase__ = 1
UpperCAmelCase__ , UpperCAmelCase__ = index.search(lowerCamelCase__ )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def a_ ( lowerCamelCase ):
import faiss
UpperCAmelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCAmelCase__ = 'index.faiss'
UpperCAmelCase__ = f'''mock://{index_name}'''
index.save(lowerCamelCase , storage_options=mockfs.storage_options )
UpperCAmelCase__ = FaissIndex.load(lowerCamelCase , storage_options=mockfs.storage_options )
UpperCAmelCase__ = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase__ = 1
UpperCAmelCase__ , UpperCAmelCase__ = index.search(lowerCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCAmelCase__ = Elasticsearch()
UpperCAmelCase__ = {'acknowledged': True}
UpperCAmelCase__ = ElasticSearchIndex(es_client=lowerCamelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
UpperCAmelCase__ = 'foo'
UpperCAmelCase__ = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCAmelCase__ , UpperCAmelCase__ = index.search(lowerCamelCase__ )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
UpperCAmelCase__ = 'foo'
UpperCAmelCase__ = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCAmelCase__ , UpperCAmelCase__ = index.search(lowerCamelCase__ ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
UpperCAmelCase__ = ['foo', 'bar', 'foobar']
UpperCAmelCase__ = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCAmelCase__ , UpperCAmelCase__ = index.search_batch(lowerCamelCase__ )
UpperCAmelCase__ = [scores[0] for scores in total_scores]
UpperCAmelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase__ ) ,0 )
self.assertListEqual([1, 1, 1] ,lowerCamelCase__ )
# batched queries with timeout
UpperCAmelCase__ = ['foo', 'bar', 'foobar']
UpperCAmelCase__ = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCAmelCase__ , UpperCAmelCase__ = index.search_batch(lowerCamelCase__ ,request_timeout=30 )
UpperCAmelCase__ = [scores[0] for scores in total_scores]
UpperCAmelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase__ ) ,0 )
self.assertListEqual([1, 1, 1] ,lowerCamelCase__ )
| 98 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
UpperCAmelCase__ = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCamelCase )
return parser.parse_args()
def a_ ( ):
UpperCAmelCase__ = parse_args()
# Import training_script as a module.
UpperCAmelCase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase__ = script_fpath.stem
UpperCAmelCase__ = importlib.import_module(lowerCamelCase )
# Patch sys.argv
UpperCAmelCase__ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | 1 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def a_ ( lowerCamelCase ):
return getitem, k
def a_ ( lowerCamelCase , lowerCamelCase ):
return setitem, k, v
def a_ ( lowerCamelCase ):
return delitem, k
def a_ ( lowerCamelCase , lowerCamelCase , *lowerCamelCase ):
try:
return fun(lowerCamelCase , *lowerCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase__ : Union[str, Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCAmelCase__ : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCAmelCase__ : Dict = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCAmelCase__ : Union[str, Any] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCAmelCase__ : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase__ : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = HashMap(initial_block_size=4 )
UpperCAmelCase__ = {}
for _, (fun, *args) in enumerate(lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = _run_operation(lowerCamelCase , lowerCamelCase , *lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = _run_operation(lowerCamelCase , lowerCamelCase , *lowerCamelCase )
assert my_res == py_res
assert str(lowerCamelCase ) == str(lowerCamelCase )
assert set(lowerCamelCase ) == set(lowerCamelCase )
assert len(lowerCamelCase ) == len(lowerCamelCase )
assert set(my.items() ) == set(py.items() )
def a_ ( ):
def is_public(lowerCamelCase ) -> bool:
return not name.startswith('_' )
UpperCAmelCase__ = {name for name in dir({} ) if is_public(lowerCamelCase )}
UpperCAmelCase__ = {name for name in dir(HashMap() ) if is_public(lowerCamelCase )}
assert dict_public_names > hash_public_names
| 98 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
# Recurse if needed
if "." in tensor_name:
UpperCAmelCase__ = tensor_name.split('.' )
for split in splits[:-1]:
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
UpperCAmelCase__ = new_module
UpperCAmelCase__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
UpperCAmelCase__ = tensor_name in module._buffers
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if is_buffer or not is_bitsandbytes_available():
UpperCAmelCase__ = False
UpperCAmelCase__ = False
else:
UpperCAmelCase__ = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCAmelCase__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCAmelCase__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCAmelCase__ = old_value.to(lowerCamelCase )
elif isinstance(lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ = value.to('cpu' )
if value.dtype == torch.inta:
UpperCAmelCase__ = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCAmelCase__ = torch.tensor(lowerCamelCase , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCamelCase ) and fpaa_statistics is None:
UpperCAmelCase__ = new_value.T
UpperCAmelCase__ = old_value.__dict__
if is_abit:
UpperCAmelCase__ = bnb.nn.IntaParams(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase ).to(lowerCamelCase )
elif is_abit:
UpperCAmelCase__ = bnb.nn.Paramsabit(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase ).to(lowerCamelCase )
UpperCAmelCase__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(lowerCamelCase ) )
else:
if value is None:
UpperCAmelCase__ = old_value.to(lowerCamelCase )
elif isinstance(lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ = value.to(lowerCamelCase )
else:
UpperCAmelCase__ = torch.tensor(lowerCamelCase , device=lowerCamelCase )
if is_buffer:
UpperCAmelCase__ = new_value
else:
UpperCAmelCase__ = nn.Parameter(lowerCamelCase , requires_grad=old_value.requires_grad )
UpperCAmelCase__ = new_value
def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False ):
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase__ = []
current_key_name.append(lowerCamelCase )
if (isinstance(lowerCamelCase , nn.Linear ) or isinstance(lowerCamelCase , lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = module.weight.shape
else:
UpperCAmelCase__ = module.in_features
UpperCAmelCase__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCAmelCase__ = bnb.nn.LinearabitLt(
lowerCamelCase , lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCAmelCase__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCAmelCase__ = bnb.nn.Linearabit(
lowerCamelCase , lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCAmelCase__ = True
# Store the module class in case we need to transpose the weight later
UpperCAmelCase__ = type(lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCamelCase )
if len(list(module.children() ) ) > 0:
UpperCAmelCase__ , UpperCAmelCase__ = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_been_replaced=lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ):
UpperCAmelCase__ = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCAmelCase__ , UpperCAmelCase__ = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def a_ ( *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , lowerCamelCase , )
return replace_with_bnb_linear(*lowerCamelCase , **lowerCamelCase )
def a_ ( *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , lowerCamelCase , )
return set_module_quantized_tensor_to_device(*lowerCamelCase , **lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = deepcopy(lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCAmelCase__ = find_tied_parameters(lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase__ = sum(lowerCamelCase , [] )
UpperCAmelCase__ = len(lowerCamelCase ) > 0
# Check if it is a base model
UpperCAmelCase__ = not hasattr(lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase__ = list(model.named_children() )
UpperCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase__ = set(lowerCamelCase ) - set(lowerCamelCase )
UpperCAmelCase__ = list(set(lowerCamelCase ) ) + list(lowerCamelCase )
# remove ".weight" from the keys
UpperCAmelCase__ = ['.weight', '.bias']
UpperCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase__ = name.replace(lowerCamelCase , '' )
filtered_module_names.append(lowerCamelCase )
return filtered_module_names
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
snake_case__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 98 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : int=18 ,lowerCamelCase__ : Any=30 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : int=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Tuple=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Optional[Any]=False ,):
UpperCAmelCase__ = size if size is not None else {'height': 20, 'width': 20}
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = min_resolution
UpperCAmelCase__ = max_resolution
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean
UpperCAmelCase__ = image_std
UpperCAmelCase__ = do_reduce_labels
def __lowerCAmelCase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def a_ ( ):
UpperCAmelCase__ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCAmelCase__ = Image.open(dataset[0]['file'] )
UpperCAmelCase__ = Image.open(dataset[1]['file'] )
return image, map
def a_ ( ):
UpperCAmelCase__ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCAmelCase__ = Image.open(ds[0]['file'] )
UpperCAmelCase__ = Image.open(ds[1]['file'] )
UpperCAmelCase__ = Image.open(ds[2]['file'] )
UpperCAmelCase__ = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = BeitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = BeitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'size' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_std' ) )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels ,lowerCamelCase__ )
UpperCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=lowerCamelCase__ )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
def __lowerCAmelCase ( self : str ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCAmelCase ( self : Dict ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCAmelCase ( self : Any ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
UpperCAmelCase__ = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] ,maps[0] ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
UpperCAmelCase__ , UpperCAmelCase__ = prepare_semantic_single_inputs()
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
UpperCAmelCase__ , UpperCAmelCase__ = prepare_semantic_batch_inputs()
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def __lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase__ , UpperCAmelCase__ = prepare_semantic_single_inputs()
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
UpperCAmelCase__ = True
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 98 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "new-model"
if is_tf_available():
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = NewModelConfig
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = 'bert-base-cased'
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = 'bert-base-cased'
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : int ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : Dict ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : List[str] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : str ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : Dict ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
@require_tensorflow_probability
def __lowerCAmelCase ( self : List[Any] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) ,14_410 )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) ,14_410 )
def __lowerCAmelCase ( self : Optional[Any] ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
UpperCAmelCase__ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = copy.deepcopy(model.config )
UpperCAmelCase__ = ['FunnelBaseModel']
UpperCAmelCase__ = TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
try:
AutoConfig.register('new-model' ,lowerCamelCase__ )
UpperCAmelCase__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ ,lowerCamelCase__ )
auto_class.register(lowerCamelCase__ ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ = BertModelTester(self ).get_config()
UpperCAmelCase__ = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase__ = auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowerCAmelCase ( self : str ):
with self.assertRaisesRegex(
lowerCamelCase__ ,'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase__ = TFAutoModel.from_pretrained('bert-base' )
def __lowerCAmelCase ( self : Tuple ):
with self.assertRaisesRegex(
lowerCamelCase__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase__ = TFAutoModel.from_pretrained(lowerCamelCase__ ,revision='aaaaaa' )
def __lowerCAmelCase ( self : List[str] ):
with self.assertRaisesRegex(
lowerCamelCase__ ,'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' ,):
UpperCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCAmelCase ( self : Optional[Any] ):
with self.assertRaisesRegex(lowerCamelCase__ ,'Use `from_pt=True` to load this model' ):
UpperCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def __lowerCAmelCase ( self : Union[str, Any] ):
# Make sure we have cached the model.
UpperCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
UpperCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
UpperCAmelCase__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
UpperCAmelCase__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 98 | """simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple ,lowerCamelCase__ : int ):
UpperCAmelCase__ = data
def __iter__( self : List[Any] ):
for element in self.data:
yield element
def a_ ( lowerCamelCase=True ):
UpperCAmelCase__ = Accelerator(even_batches=lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
if iterable:
UpperCAmelCase__ = DummyIterableDataset(torch.as_tensor(range(lowerCamelCase ) ) )
else:
UpperCAmelCase__ = TensorDataset(torch.as_tensor(range(lowerCamelCase ) ) )
UpperCAmelCase__ = DataLoader(lowerCamelCase , batch_size=lowerCamelCase )
UpperCAmelCase__ = accelerator.prepare(lowerCamelCase )
return dl
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
UpperCAmelCase__ = create_dataloader(accelerator=lowerCamelCase , dataset_size=lowerCamelCase , batch_size=lowerCamelCase )
UpperCAmelCase__ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a_ ( ):
UpperCAmelCase__ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a_ ( ):
UpperCAmelCase__ = create_accelerator(even_batches=lowerCamelCase )
verify_dataloader_batch_sizes(
lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a_ ( ):
UpperCAmelCase__ = create_accelerator(even_batches=lowerCamelCase )
UpperCAmelCase__ = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ = accelerator.prepare(lowerCamelCase )
UpperCAmelCase__ = create_dataloader(lowerCamelCase , dataset_size=3 , batch_size=1 )
UpperCAmelCase__ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(lowerCamelCase ):
UpperCAmelCase__ = ddp_model(batch[0].float() )
UpperCAmelCase__ = output.sum()
loss.backward()
batch_idxs.append(lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a_ ( lowerCamelCase ):
with warnings.catch_warnings(record=lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a_ ( ):
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = create_accelerator(even_batches=lowerCamelCase )
UpperCAmelCase__ = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ = accelerator.prepare(lowerCamelCase )
UpperCAmelCase__ = create_dataloader(lowerCamelCase , dataset_size=3 , batch_size=1 )
UpperCAmelCase__ = create_dataloader(lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCamelCase ):
UpperCAmelCase__ = train_dl.batch_sampler.even_batches
UpperCAmelCase__ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a_ ( ):
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = create_accelerator(even_batches=lowerCamelCase )
UpperCAmelCase__ = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ = accelerator.prepare(lowerCamelCase )
create_dataloader(lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=lowerCamelCase )
UpperCAmelCase__ = create_dataloader(lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCamelCase ):
UpperCAmelCase__ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a_ ( ):
UpperCAmelCase__ = create_accelerator()
UpperCAmelCase__ = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ = accelerator.prepare(lowerCamelCase )
create_dataloader(lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=lowerCamelCase )
with warnings.catch_warnings(record=lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCamelCase ):
pass
assert issubclass(w[-1].category , lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a_ ( ):
UpperCAmelCase__ = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
UpperCAmelCase__ = accelerator.state.distributed_type
UpperCAmelCase__ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowerCamelCase )
UpperCAmelCase__ = original_state
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ : Tuple = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase__ : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
UpperCAmelCase__ = len(lowerCamelCase )
for i in range(lowerCamelCase ):
UpperCAmelCase__ = -1
for j in range(i + 1 , lowerCamelCase ):
if arr[i] < arr[j]:
UpperCAmelCase__ = arr[j]
break
result.append(lowerCamelCase )
return result
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for i, outer in enumerate(lowerCamelCase ):
UpperCAmelCase__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCAmelCase__ = inner
break
result.append(lowerCamelCase )
return result
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = [-1] * arr_size
for index in reversed(range(lowerCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCAmelCase__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase__ : List[str] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = 1
@register_to_config
def __init__( self : List[str] ,lowerCamelCase__ : int = 2_000 ,lowerCamelCase__ : float = 0.1_5 ,lowerCamelCase__ : float = 0.0_1 ,lowerCamelCase__ : float = 1_3_4_8.0 ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : int = 1 ,):
# standard deviation of the initial noise distribution
UpperCAmelCase__ = sigma_max
# setable values
UpperCAmelCase__ = None
self.set_sigmas(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : Optional[int] = None ):
return sample
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : float = None ,lowerCamelCase__ : Union[str, torch.device] = None ):
UpperCAmelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase__ = torch.linspace(1 ,lowerCamelCase__ ,lowerCamelCase__ ,device=lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : float = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : float = None ):
UpperCAmelCase__ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase__ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase__ = torch.exp(torch.linspace(math.log(lowerCamelCase__ ) ,math.log(lowerCamelCase__ ) ,lowerCamelCase__ ) )
UpperCAmelCase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ):
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device ) ) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device ) ,)
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : int ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : Optional[torch.Generator] = None ,lowerCamelCase__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCAmelCase__ = timestep * torch.ones(
sample.shape[0] ,device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase__ = timesteps.to(self.discrete_sigmas.device )
UpperCAmelCase__ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCAmelCase__ = self.get_adjacent_sigma(lowerCamelCase__ ,lowerCamelCase__ ).to(sample.device )
UpperCAmelCase__ = torch.zeros_like(lowerCamelCase__ )
UpperCAmelCase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCAmelCase__ = diffusion.unsqueeze(-1 )
UpperCAmelCase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase__ = randn_tensor(
sample.shape ,layout=sample.layout ,generator=lowerCamelCase__ ,device=sample.device ,dtype=sample.dtype )
UpperCAmelCase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase__ ,prev_sample_mean=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : Optional[torch.Generator] = None ,lowerCamelCase__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase__ = randn_tensor(sample.shape ,layout=sample.layout ,generator=lowerCamelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase__ = torch.norm(model_output.reshape(model_output.shape[0] ,-1 ) ,dim=-1 ).mean()
UpperCAmelCase__ = torch.norm(noise.reshape(noise.shape[0] ,-1 ) ,dim=-1 ).mean()
UpperCAmelCase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCAmelCase__ = step_size.unsqueeze(-1 )
UpperCAmelCase__ = sample + step_size * model_output
UpperCAmelCase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCAmelCase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase__ ) * sigmas[:, None, None, None]
)
UpperCAmelCase__ = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
return self.config.num_train_timesteps
| 98 | """simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any]=13 ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Dict=224 ,lowerCamelCase__ : List[str]=1_000 ,lowerCamelCase__ : List[Any]=[3, 3, 6, 4] ,lowerCamelCase__ : int=[48, 56, 112, 220] ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = layer_depths
UpperCAmelCase__ = embed_dims
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Optional[Any] ):
return SwiftFormerConfig(
depths=self.layer_depths ,embed_dims=self.embed_dims ,mlp_ratio=4 ,downsamples=[True, True, True, True] ,hidden_act='gelu' ,num_labels=self.num_labels ,down_patch_size=3 ,down_stride=2 ,down_pad=1 ,drop_rate=0.0 ,drop_path_rate=0.0 ,use_layer_scale=lowerCamelCase__ ,layer_scale_init_value=1e-5 ,)
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = SwiftFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = SwiftFormerForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
UpperCAmelCase__ = SwiftFormerForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] ):
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = self.prepare_config_and_inputs()
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = SwiftFormerModelTester(self )
UpperCAmelCase__ = ConfigTester(
self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 ,num_attention_heads=12 ,num_hidden_layers=12 ,)
def __lowerCAmelCase ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : str ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = SwiftFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : str ):
def check_hidden_states_output(lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = 8
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCamelCase__ ) ):
self.assertEqual(
hidden_states[i].shape ,torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) ,)
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
def _config_zero_init(lowerCamelCase__ : Dict ):
UpperCAmelCase__ = copy.deepcopy(lowerCamelCase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCamelCase__ ,lowerCamelCase__ ,1e-10 )
if isinstance(getattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ ):
UpperCAmelCase__ = _config_zero_init(getattr(lowerCamelCase__ ,lowerCamelCase__ ) )
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return configs_no_init
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self : Dict ):
pass
def a_ ( ):
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : str ):
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(lowerCamelCase__ )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**lowerCamelCase__ )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
| 98 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : List[Any] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,**lowerCamelCase__ : Union[str, Any] ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 384}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase__ = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : float ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
UpperCAmelCase__ = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase__ = int(shortest_edge / crop_pct )
UpperCAmelCase__ = get_resize_output_image_size(lowerCamelCase__ ,size=lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__ ,size=(shortest_edge, shortest_edge) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__ ,size=(shortest_edge, shortest_edge) ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : str ,):
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[str] ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : Any ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,crop_pct=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 98 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Any = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __get__( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
UpperCAmelCase__ = '__cached_' + self.fget.__name__
UpperCAmelCase__ = getattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if cached is None:
UpperCAmelCase__ = self.fget(lowerCamelCase__ )
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return cached
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a_ ( lowerCamelCase ):
if is_torch_fx_proxy(lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return _is_numpy(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.device )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_device(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
if isinstance(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
else:
return False
return isinstance(lowerCamelCase , torch.dtype )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
return isinstance(lowerCamelCase , tf.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCamelCase )
return type(lowerCamelCase ) == tf.Tensor
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase )
def a_ ( lowerCamelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase , jnp.ndarray )
def a_ ( lowerCamelCase ):
return False if not is_flax_available() else _is_jax(lowerCamelCase )
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return [to_py_obj(lowerCamelCase ) for o in obj]
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase ).tolist()
elif isinstance(lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return np.array(lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase )
else:
return obj
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(lowerCamelCase__ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase__ = getattr(self ,class_fields[0].name )
UpperCAmelCase__ = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = first_field.items()
UpperCAmelCase__ = True
else:
try:
UpperCAmelCase__ = iter(lowerCamelCase__ )
UpperCAmelCase__ = True
except TypeError:
UpperCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCamelCase__ ):
if (
not isinstance(lowerCamelCase__ ,(list, tuple) )
or not len(lowerCamelCase__ ) == 2
or not isinstance(element[0] ,lowerCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
UpperCAmelCase__ = element[1]
elif first_field is not None:
UpperCAmelCase__ = first_field
else:
for field in class_fields:
UpperCAmelCase__ = getattr(self ,field.name )
if v is not None:
UpperCAmelCase__ = v
def __delitem__( self : Union[str, Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Dict ):
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : str ):
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Union[str, Any] ):
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : List[Any] ):
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str] ,lowerCamelCase__ : List[Any] ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __setitem__( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ):
# Will raise a KeyException if needed
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
return tuple(self[k] for k in self.keys() )
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : List[str] ,lowerCamelCase__ : List[Any] ):
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "longest"
snake_case__ = "max_length"
snake_case__ = "do_not_pad"
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "pt"
snake_case__ = "tf"
snake_case__ = "np"
snake_case__ = "jax"
class snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCamelCase__ : List[ContextManager] ):
UpperCAmelCase__ = context_managers
UpperCAmelCase__ = ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(lowerCamelCase__ )
def __exit__( self : Union[str, Any] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Dict ):
self.stack.__exit__(*lowerCamelCase__ ,**lowerCamelCase__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = model_class.__name__
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( lowerCamelCase , lowerCamelCase = "" , lowerCamelCase = "." ):
def _flatten_dict(lowerCamelCase , lowerCamelCase="" , lowerCamelCase="." ):
for k, v in d.items():
UpperCAmelCase__ = str(lowerCamelCase ) + delimiter + str(lowerCamelCase ) if parent_key else k
if v and isinstance(lowerCamelCase , lowerCamelCase ):
yield from flatten_dict(lowerCamelCase , lowerCamelCase , delimiter=lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
@contextmanager
def a_ ( lowerCamelCase , lowerCamelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.transpose(lowerCamelCase , axes=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.T if axes is None else array.permute(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.transpose(lowerCamelCase , perm=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.transpose(lowerCamelCase , axes=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.reshape(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.reshape(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.reshape(lowerCamelCase , lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.reshape(lowerCamelCase , lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.squeeze(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.expand_dims(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.unsqueeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.expand_dims(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.size(lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.numel()
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.size(lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
for key, value in auto_map.items():
if isinstance(lowerCamelCase , (tuple, list) ):
UpperCAmelCase__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ = f'''{repo_id}--{value}'''
return auto_map
def a_ ( lowerCamelCase ):
for base_class in inspect.getmro(lowerCamelCase ):
UpperCAmelCase__ = base_class.__module__
UpperCAmelCase__ = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ : str = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase__ : Dict = logging.getLogger(__name__)
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=lowerCamelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=lowerCamelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=lowerCamelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=lowerCamelCase , default='data/dump' , help='The dump file prefix.' )
UpperCAmelCase__ = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map['cls_token'] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info('Start encoding' )
logger.info(f'''{len(lowerCamelCase )} examples to process.''' )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1_0_0_0_0
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f'''{bos} {text.strip()} {sep}'''
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
rslt.append(lowerCamelCase )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
UpperCAmelCase__ = time.time()
logger.info('Finished binarization' )
logger.info(f'''{len(lowerCamelCase )} examples processed.''' )
UpperCAmelCase__ = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
UpperCAmelCase__ = [np.uintaa(lowerCamelCase ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(lowerCamelCase , 'wb' ) as handle:
pickle.dump(rslt_ , lowerCamelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
return abs(lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase__ , UpperCAmelCase__ = y, x % y
return abs(lowerCamelCase )
def a_ ( ):
try:
UpperCAmelCase__ = input('Enter two integers separated by comma (,): ' ).split(',' )
UpperCAmelCase__ = int(nums[0] )
UpperCAmelCase__ = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(lowerCamelCase , lowerCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase , lowerCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = [0] * len(lowerCamelCase )
for i in range(1 , len(lowerCamelCase ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase__ = j
return prefix_result
def a_ ( lowerCamelCase ):
return max(prefix_function(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Any = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ : Dict = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ : Any = {
'gpt-neox-20b': 2_048,
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : int="<|endoftext|>" ,lowerCamelCase__ : Dict="<|endoftext|>" ,lowerCamelCase__ : int="<|endoftext|>" ,lowerCamelCase__ : Any=False ,**lowerCamelCase__ : List[str] ,):
super().__init__(
lowerCamelCase__ ,lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,add_prefix_space=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,lowerCamelCase__ ) != add_prefix_space:
UpperCAmelCase__ = getattr(lowerCamelCase__ ,pre_tok_state.pop('type' ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**lowerCamelCase__ )
UpperCAmelCase__ = add_prefix_space
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : "Conversation" ):
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
UpperCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
UpperCAmelCase__ = DatasetInfosDict.from_directory(lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , ),
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = str(lowerCamelCase )
dataset_info.write_to_directory(lowerCamelCase )
UpperCAmelCase__ = DatasetInfo.from_directory(lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase , 'dataset_info.json' ) )
def a_ ( ):
UpperCAmelCase__ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
UpperCAmelCase__ = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase__ = yaml.safe_dump(lowerCamelCase )
UpperCAmelCase__ = yaml.safe_load(lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def a_ ( ):
UpperCAmelCase__ = DatasetInfo()
UpperCAmelCase__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=4_2 ),
'v2': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = str(lowerCamelCase )
dataset_infos_dict.write_to_directory(lowerCamelCase )
UpperCAmelCase__ = DatasetInfosDict.from_directory(lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase , 'README.md' ) )
| 98 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
snake_case__ = field(
default=__UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case__ = field(
default=__UpperCAmelCase , metadata={"help": "The column name of the images in the files."} )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "A folder containing the training data."} )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "A folder containing the validation data."} )
snake_case__ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
snake_case__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = {}
if self.train_dir is not None:
UpperCAmelCase__ = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase__ = self.validation_dir
UpperCAmelCase__ = data_files if data_files else None
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
snake_case__ = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
snake_case__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
snake_case__ = field(
default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
snake_case__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
snake_case__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
snake_case__ = field(
default=__UpperCAmelCase , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase__ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCamelCase ) and data_args.train_val_split > 0.0:
UpperCAmelCase__ = ds['train'].train_test_split(data_args.train_val_split )
UpperCAmelCase__ = split['train']
UpperCAmelCase__ = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **lowerCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase )
else:
UpperCAmelCase__ = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCamelCase )
else:
UpperCAmelCase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCAmelCase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCAmelCase__ = ViTMAEForPreTraining(lowerCamelCase )
if training_args.do_train:
UpperCAmelCase__ = ds['train'].column_names
else:
UpperCAmelCase__ = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCAmelCase__ = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase__ = 'image'
elif "img" in column_names:
UpperCAmelCase__ = 'img'
else:
UpperCAmelCase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCAmelCase__ = image_processor.size['shortest_edge']
else:
UpperCAmelCase__ = (image_processor.size['height'], image_processor.size['width'])
UpperCAmelCase__ = Compose(
[
Lambda(lambda lowerCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowerCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowerCamelCase ):
UpperCAmelCase__ = [transforms(lowerCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCAmelCase__ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCAmelCase__ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCamelCase )
# Compute absolute learning rate
UpperCAmelCase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCAmelCase__ = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
UpperCAmelCase__ = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ = last_checkpoint
UpperCAmelCase__ = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase__ = trainer.evaluate()
trainer.log_metrics('eval' , lowerCamelCase )
trainer.save_metrics('eval' , lowerCamelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase__ = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def a_ ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.