code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
lowerCamelCase__ : Dict = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
lowerCamelCase__ : Union[str, Any] = 'huggingface/label-files'
if "o365" in model_name:
lowerCamelCase__ : List[Any] = 366
lowerCamelCase__ : int = 'object365-id2label.json'
else:
lowerCamelCase__ : Tuple = 91
lowerCamelCase__ : Dict = 'coco-detection-id2label.json'
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[int] = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = dct.pop(_lowerCamelCase )
lowerCamelCase__ : Tuple = val
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = in_proj_weight[:dim, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[: dim]
lowerCamelCase__ : Any = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : List[Any] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : Any = in_proj_weight[
-dim :, :
]
lowerCamelCase__ : Dict = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# transformer decoder self-attention layers
lowerCamelCase__ : Optional[Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase__ : Any = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Any = in_proj_weight[:hidden_size, :]
lowerCamelCase__ : Optional[Any] = in_proj_bias[:hidden_size]
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCamelCase__ : str = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Union[str, Any] = in_proj_weight[-hidden_size:, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[-hidden_size:]
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
lowerCamelCase__ : int = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
lowerCamelCase__ : Any = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
lowerCamelCase__ : Optional[Any] = torch.load(_lowerCamelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
lowerCamelCase__ : Tuple = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCamelCase__ : int = state_dict.pop(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = val
if "input_proj" in key:
lowerCamelCase__ : List[Any] = state_dict.pop(_lowerCamelCase )
lowerCamelCase__ : Any = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCamelCase__ : Dict = state_dict.pop(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ : Tuple = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
lowerCamelCase__ : int = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_lowerCamelCase )
# load image processor
lowerCamelCase__ : str = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Any = processor(images=_lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : List[str] = encoding['pixel_values']
lowerCamelCase__ : Any = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
lowerCamelCase__ : List[str] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
lowerCamelCase__ : str = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
lowerCamelCase__ : List[str] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 367
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 0
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
A_ : Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
A_ : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
A_ : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
A_ : int = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
A_ : int = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
A_ : Union[str, Any] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
A_ : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
A_ : int = np.expand_dims(test_image, axis=0)
A_ : Tuple = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
A_ : Any = "Normal"
if result[0][0] == 1:
A_ : Any = "Abnormality detected"
| 368
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 0
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 0
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = flatten_dict(_lowerCamelCase )
return flax_params
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Optional[Any] = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
lowerCamelCase__ : Union[str, Any] = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase__ : Union[str, Any] = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase__ : int = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase__ : Optional[Any] = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase__ : List[Any] = re.sub(r'layers_(\d+)' , r'layer.\1' , _lowerCamelCase )
lowerCamelCase__ : Optional[Any] = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase__ : int = re.sub(r'layers_(\d+)' , r'layer.\1' , _lowerCamelCase )
lowerCamelCase__ : Any = flax_dict[key]
lowerCamelCase__ : List[str] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase__ : Dict = torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase__ : Union[str, Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
lowerCamelCase__ : List[Any] = get_flax_param(_lowerCamelCase )
if not use_large:
lowerCamelCase__ : Optional[Any] = PixaStructVisionConfig()
lowerCamelCase__ : Dict = PixaStructTextConfig()
else:
lowerCamelCase__ : Optional[Any] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase__ : Union[str, Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase__ : Optional[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowerCamelCase )
lowerCamelCase__ : Any = PixaStructForConditionalGeneration(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = rename_and_convert_flax_params(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
lowerCamelCase__ : Tuple = PixaStructImageProcessor()
lowerCamelCase__ : Tuple = PixaStructProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
if use_large:
lowerCamelCase__ : Union[str, Any] = 4096
lowerCamelCase__ : int = True
# mkdir if needed
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
print('Model saved in {}'.format(_lowerCamelCase ) )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
A_ : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase__ : int = 1
lowerCamelCase__ : str = 1
while repunit:
lowerCamelCase__ : Optional[int] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase_ ( _lowerCamelCase = 100_0000 ):
lowerCamelCase__ : Optional[int] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| 371
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = word.split()
def justify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
lowerCamelCase__ : Optional[Any] = max_width - width
lowerCamelCase__ : str = len(_lowerCamelCase )
if len(_lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCamelCase__ : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCamelCase__ : Union[str, Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCamelCase__ : Optional[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowerCamelCase ):
num_spaces_between_words_list[i] += 1
lowerCamelCase__ : Dict = []
for i in range(_lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : list[str] = []
lowerCamelCase__ : Optional[int] = 0
for word in words:
if width + len(_lowerCamelCase ) + len(_lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowerCamelCase )
width += len(_lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
# reset new line and new width
lowerCamelCase__ : Dict = [word], len(_lowerCamelCase )
lowerCamelCase__ : Dict = max_width - width - len(_lowerCamelCase )
answer.append(' '.join(_lowerCamelCase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 350
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = (IPNDMScheduler,)
lowerCamelCase__ : Any = (('num_inference_steps', 50),)
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = {'num_train_timesteps': 1_0_0_0}
config.update(**lowerCamelCase_ )
return config
def a__ (self, lowerCamelCase_=0, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = dict(self.forward_default_kwargs )
lowerCamelCase__ : Any = kwargs.pop('num_inference_steps', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.dummy_sample
lowerCamelCase__ : List[str] = 0.1 * sample
lowerCamelCase__ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Any = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : Dict = dummy_past_residuals[:]
if time_step is None:
lowerCamelCase__ : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : Dict = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : Optional[Any] = dummy_past_residuals[:]
lowerCamelCase__ : Optional[int] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : int = new_scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase__ : Any = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Optional[Any] = new_scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ (self ):
'''simple docstring'''
pass
def a__ (self, lowerCamelCase_=0, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = dict(self.forward_default_kwargs )
lowerCamelCase__ : Tuple = kwargs.pop('num_inference_steps', lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.dummy_sample
lowerCamelCase__ : List[str] = 0.1 * sample
lowerCamelCase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : List[str] = self.get_scheduler_config()
lowerCamelCase__ : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__ : Any = dummy_past_residuals[:]
if time_step is None:
lowerCamelCase__ : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : List[str] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__ : Dict = dummy_past_residuals[:]
lowerCamelCase__ : List[Any] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : int = new_scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase__ : Dict = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : List[str] = new_scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.scheduler_classes[0]
lowerCamelCase__ : int = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Any = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : str = 1_0
lowerCamelCase__ : Union[str, Any] = self.dummy_model()
lowerCamelCase__ : Any = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : Union[str, Any] = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : Tuple = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
return sample
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = dict(self.forward_default_kwargs )
lowerCamelCase__ : int = kwargs.pop('num_inference_steps', lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : List[str] = self.get_scheduler_config()
lowerCamelCase__ : int = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : int = self.dummy_sample
lowerCamelCase__ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_, 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_, 'set_timesteps' ):
lowerCamelCase__ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCamelCase__ : Dict = dummy_past_residuals[:]
lowerCamelCase__ : Tuple = scheduler.timesteps[5]
lowerCamelCase__ : Optional[int] = scheduler.timesteps[6]
lowerCamelCase__ : int = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowerCamelCase__ : Optional[Any] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Dict = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def a__ (self ):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_, time_step=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0], [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowerCamelCase_, time_step=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.full_loop()
lowerCamelCase__ : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( snake_case_ ):
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase__ : Optional[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = None, lowerCamelCase_ = 0.0, lowerCamelCase_ = 5_0, lowerCamelCase_ = None, lowerCamelCase_ = "pil", lowerCamelCase_ = True, ):
'''simple docstring'''
if isinstance(self.unet.config.sample_size, lowerCamelCase_ ):
lowerCamelCase__ : Optional[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase__ : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Tuple = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : Any = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase__ : str = self.scheduler.step(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, eta=lowerCamelCase_, use_clipped_model_output=lowerCamelCase_, generator=lowerCamelCase_ ).prev_sample
lowerCamelCase__ : str = (image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase__ : int = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : Optional[int] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 352
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 0
|
"""simple docstring"""
from ....utils import logging
A_ : List[str] = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=2_0_4_8 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = config.__dict__
lowerCamelCase__ : str = modal_hidden_size
if num_labels:
lowerCamelCase__ : List[str] = num_labels
| 353
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase_ ( _lowerCamelCase ):
return 1 / (1 + np.exp(-z ))
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return (-y * np.log(_lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = np.dot(_lowerCamelCase , _lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase ) ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=7_0000 ):
lowerCamelCase__ : Dict = np.zeros(x.shape[1] )
for iterations in range(_lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[Any] = sigmoid_function(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = np.dot(x.T , h - y ) / y.size
lowerCamelCase__ : Tuple = theta - alpha * gradient # updating the weights
lowerCamelCase__ : Tuple = np.dot(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Dict = sigmoid_function(_lowerCamelCase )
lowerCamelCase__ : Tuple = cost_function(_lowerCamelCase , _lowerCamelCase )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ : str = datasets.load_iris()
A_ : str = iris.data[:, :2]
A_ : Optional[Any] = (iris.target != 0) * 1
A_ : List[Any] = 0.1
A_ : Union[str, Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCamelCase_ ( _lowerCamelCase ):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
(A_) : List[str] = (x[:, 0].min(), x[:, 0].max())
(A_) : List[Any] = (x[:, 1].min(), x[:, 1].max())
(A_) : Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
A_ : List[str] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 354
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 0
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : List[str] = logging.get_logger(__name__)
A_ : Tuple = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase_ ( _lowerCamelCase ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase__ : List[Any] = model_type_to_module_name(_lowerCamelCase )
lowerCamelCase__ : Dict = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_lowerCamelCase , _lowerCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowerCamelCase , '__name__' , _lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase__ : Tuple = importlib.import_module('transformers' )
if hasattr(_lowerCamelCase , _lowerCamelCase ):
return getattr(_lowerCamelCase , _lowerCamelCase )
return None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
lowerCamelCase__ : int = get_file_from_repo(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_lowerCamelCase , encoding='utf-8' ) as reader:
return json.load(_lowerCamelCase )
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase_ )
def a__ (cls, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = kwargs.pop('config', lowerCamelCase_ )
lowerCamelCase__ : List[Any] = kwargs.pop('trust_remote_code', lowerCamelCase_ )
lowerCamelCase__ : Any = True
lowerCamelCase__ : str = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : str = config_dict.get('feature_extractor_type', lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map', {} ):
lowerCamelCase__ : Tuple = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_, **lowerCamelCase_ )
# It could be in `config.feature_extractor_type``
lowerCamelCase__ : List[Any] = getattr(lowerCamelCase_, 'feature_extractor_type', lowerCamelCase_ )
if hasattr(lowerCamelCase_, 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase__ : List[Any] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
lowerCamelCase__ : str = feature_extractor_class_from_name(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase__ : List[str] = feature_extractor_class is not None or type(lowerCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase__ : Tuple = resolve_trust_remote_code(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
if has_remote_code and trust_remote_code:
lowerCamelCase__ : str = get_class_from_dynamic_module(
lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('code_revision', lowerCamelCase_ )
if os.path.isdir(lowerCamelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase_, **lowerCamelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase_, **lowerCamelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase__ : int = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase_ )]
return feature_extractor_class.from_dict(lowerCamelCase_, **lowerCamelCase_ )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase_, lowerCamelCase_ )
| 355
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 356
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 0
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# Load checkpoint
lowerCamelCase__ : Union[str, Any] = torch.load(_lowerCamelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowerCamelCase__ : Optional[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCamelCase__ : Any = v
else:
lowerCamelCase__ : Optional[int] = v
lowerCamelCase__ : Dict = chkpt['params']
lowerCamelCase__ : List[Any] = {n: v for n, v in config.items() if not isinstance(_lowerCamelCase , (torch.FloatTensor, numpy.ndarray) )}
lowerCamelCase__ : str = chkpt['dico_word2id']
lowerCamelCase__ : Optional[int] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCamelCase__ : List[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase__ : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCamelCase__ : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , indent=2 ) + '\n' )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , indent=2 ) + '\n' )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 357
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Any = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = 'pegasus'
lowerCamelCase__ : Optional[Any] = ['past_key_values']
lowerCamelCase__ : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, lowerCamelCase_=5_0_2_6_5, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_="gelu", lowerCamelCase_=1_0_2_4, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=0, lowerCamelCase_=False, lowerCamelCase_=0, lowerCamelCase_=1, lowerCamelCase_=1, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Optional[int] = d_model
lowerCamelCase__ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : Union[str, Any] = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : str = decoder_layers
lowerCamelCase__ : List[str] = decoder_attention_heads
lowerCamelCase__ : Dict = dropout
lowerCamelCase__ : Any = attention_dropout
lowerCamelCase__ : Tuple = activation_dropout
lowerCamelCase__ : Any = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : List[str] = encoder_layerdrop
lowerCamelCase__ : Union[str, Any] = decoder_layerdrop
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : Any = encoder_layers
lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, is_encoder_decoder=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, forced_eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
@property
def a__ (self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def a__ (self ):
'''simple docstring'''
return self.d_model
| 358
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 0
|
"""simple docstring"""
from math import ceil, sqrt
def lowerCamelCase_ ( _lowerCamelCase = 100_0000 ):
lowerCamelCase__ : Optional[Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__ : Optional[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__ : Optional[int] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| 359
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : str = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase__ : str = ''
else:
lowerCamelCase__ : Union[str, Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = dct.pop(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = val
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : Union[str, Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_lowerCamelCase , )
lowerCamelCase__ : str = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=384 , num_labels=1000 )
lowerCamelCase__ : str = False
# load original model from timm
lowerCamelCase__ : str = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Tuple = 'huggingface/label-files'
lowerCamelCase__ : Any = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Optional[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : Any = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase__ : List[Any] = ViTHybridModel(_lowerCamelCase ).eval()
else:
lowerCamelCase__ : Dict = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
lowerCamelCase__ : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
lowerCamelCase__ : Optional[int] = transform.transforms
lowerCamelCase__ : Optional[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCamelCase__ : List[Any] = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Tuple = transform(_lowerCamelCase ).unsqueeze(0 )
lowerCamelCase__ : List[str] = processor(_lowerCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
lowerCamelCase__ : Tuple = model(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
lowerCamelCase__ : Optional[int] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
lowerCamelCase__ : int = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 360
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Dict = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'mra'
def __init__(self, lowerCamelCase_=5_0_2_6_5, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1, lowerCamelCase_=0.02, lowerCamelCase_=1e-5, lowerCamelCase_="absolute", lowerCamelCase_=4, lowerCamelCase_="full", lowerCamelCase_=0, lowerCamelCase_=0, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : Optional[Any] = position_embedding_type
lowerCamelCase__ : Any = block_per_row
lowerCamelCase__ : List[Any] = approx_mode
lowerCamelCase__ : Union[str, Any] = initial_prior_first_n_blocks
lowerCamelCase__ : str = initial_prior_diagonal_n_blocks
| 361
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 0
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
lowerCamelCase_, split=lowerCamelCase_, features=lowerCamelCase_, cache_dir=lowerCamelCase_, keep_in_memory=lowerCamelCase_, streaming=lowerCamelCase_, num_proc=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Any = field
lowerCamelCase__ : str = path_or_paths if isinstance(lowerCamelCase_, lowerCamelCase_ ) else {self.split: path_or_paths}
lowerCamelCase__ : Dict = Json(
cache_dir=lowerCamelCase_, data_files=lowerCamelCase_, features=lowerCamelCase_, field=lowerCamelCase_, **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
if self.streaming:
lowerCamelCase__ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : str = None
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_, download_mode=lowerCamelCase_, verification_mode=lowerCamelCase_, base_path=lowerCamelCase_, num_proc=self.num_proc, )
lowerCamelCase__ : List[Any] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase_, in_memory=self.keep_in_memory )
return dataset
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
lowerCamelCase__ : List[Any] = dataset
lowerCamelCase__ : List[Any] = path_or_buf
lowerCamelCase__ : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCamelCase__ : List[Any] = num_proc
lowerCamelCase__ : List[str] = 'utf-8'
lowerCamelCase__ : Optional[int] = to_json_kwargs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.to_json_kwargs.pop('path_or_buf', lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.to_json_kwargs.pop('orient', 'records' )
lowerCamelCase__ : Union[str, Any] = self.to_json_kwargs.pop('lines', True if orient == 'records' else False )
lowerCamelCase__ : Union[str, Any] = self.to_json_kwargs.pop('index', False if orient in ['split', 'table'] else True )
lowerCamelCase__ : str = self.to_json_kwargs.pop('compression', lowerCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf, 'wb', compression=lowerCamelCase_ ) as buffer:
lowerCamelCase__ : Tuple = self._write(file_obj=lowerCamelCase_, orient=lowerCamelCase_, lines=lowerCamelCase_, index=lowerCamelCase_, **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
lowerCamelCase__ : List[Any] = self._write(
file_obj=self.path_or_buf, orient=lowerCamelCase_, lines=lowerCamelCase_, index=lowerCamelCase_, **self.to_json_kwargs )
return written
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = args
lowerCamelCase__ : Union[str, Any] = query_table(
table=self.dataset.data, key=slice(lowerCamelCase_, offset + self.batch_size ), indices=self.dataset._indices, )
lowerCamelCase__ : Optional[int] = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase_, orient=lowerCamelCase_, lines=lowerCamelCase_, index=lowerCamelCase_, **lowerCamelCase_ )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset ), self.batch_size ), unit='ba', disable=not logging.is_progress_bar_enabled(), desc='Creating json from Arrow format', ):
lowerCamelCase__ : str = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json, [(offset, orient, lines, index, to_json_kwargs) for offset in range(0, lowerCamelCase_, lowerCamelCase_ )], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit='ba', disable=not logging.is_progress_bar_enabled(), desc='Creating json from Arrow format', ):
written += file_obj.write(lowerCamelCase_ )
return written
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase_ ( _lowerCamelCase ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase_ ( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ : Dict = [1, 2, 3]
with pytest.raises(_lowerCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=2 )
with pytest.raises(_lowerCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = [1, 2]
lowerCamelCase__ : Tuple = {'a': 1, 'b': 2}
lowerCamelCase__ : List[Any] = {'a': [1, 2], 'b': [3, 4]}
lowerCamelCase__ : Optional[int] = {'a': {'1': 1}, 'b': 2}
lowerCamelCase__ : List[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowerCamelCase__ : List[str] = [2, 3]
lowerCamelCase__ : List[Any] = {'a': 2, 'b': 3}
lowerCamelCase__ : Optional[Any] = {'a': [2, 3], 'b': [4, 5]}
lowerCamelCase__ : Any = {'a': {'1': 2}, 'b': 3}
lowerCamelCase__ : List[Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
| 363
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Optional[int] = "▁"
A_ : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
A_ : List[str] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
A_ : Tuple = {"vinai/bartpho-syllable": 10_24}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ['input_ids', 'attention_mask']
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
lowerCamelCase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, cls_token=lowerCamelCase_, pad_token=lowerCamelCase_, mask_token=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
lowerCamelCase__ : int = vocab_file
lowerCamelCase__ : List[Any] = monolingual_vocab_file
lowerCamelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : List[Any] = cnt
cnt += 1
with open(lowerCamelCase_, 'r', encoding='utf-8' ) as f:
for line in f.readlines():
lowerCamelCase__ : Tuple = line.strip().split()[0]
lowerCamelCase__ : Any = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : Optional[Any] = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.__dict__.copy()
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_, token_ids_a=lowerCamelCase_, already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ (self ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = ''.join(lowerCamelCase_ ).replace(lowerCamelCase_, ' ' ).strip()
return out_string
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : str = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, 'wb' ) as fi:
lowerCamelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase_, 'w', encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase_ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 365
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
lowerCamelCase__ : List[str] = cipher_alphabet or [chr(_lowerCamelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Any = {
'a': 0.08_497,
'b': 0.01_492,
'c': 0.02_202,
'd': 0.04_253,
'e': 0.11_162,
'f': 0.02_228,
'g': 0.02_015,
'h': 0.06_094,
'i': 0.07_546,
'j': 0.00_153,
'k': 0.01_292,
'l': 0.04_025,
'm': 0.02_406,
'n': 0.06_749,
'o': 0.07_507,
'p': 0.01_929,
'q': 0.00_095,
'r': 0.07_587,
's': 0.06_327,
't': 0.09_356,
'u': 0.02_758,
'v': 0.00_978,
'w': 0.02_560,
'x': 0.00_150,
'y': 0.01_994,
'z': 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Union[str, Any] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : Any = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : List[Any] = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : List[str] = (alphabet_letters.index(letter.lower() ) - shift) % len(
_lowerCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : int = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : Union[str, Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Optional[int] = decrypted_with_shift.lower().count(_lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Union[str, Any] = decrypted_with_shift.count(_lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowerCamelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_lowerCamelCase , key=_lowerCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
lowerCamelCase__
) : Union[str, Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 366
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
A_ : int = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = set()
# keep track of all the paths to be checked
lowerCamelCase__ : Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCamelCase__ : List[Any] = queue.pop(0 )
# get the last node from the path
lowerCamelCase__ : Optional[Any] = path[-1]
if node not in explored:
lowerCamelCase__ : Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCamelCase__ : str = list(_lowerCamelCase )
new_path.append(_lowerCamelCase )
queue.append(_lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCamelCase__ : Optional[int] = [start]
lowerCamelCase__ : str = set(_lowerCamelCase )
# Keep tab on distances from `start` node.
lowerCamelCase__ : Dict = {start: 0, target: -1}
while queue:
lowerCamelCase__ : List[Any] = queue.pop(0 )
if node == target:
lowerCamelCase__ : int = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowerCamelCase )
queue.append(_lowerCamelCase )
lowerCamelCase__ : int = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 367
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 100_0000 ):
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : List[Any] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
lowerCamelCase__ : int = 0
lowerCamelCase__ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase__ : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase__ : int = counter
if counter > pre_counter:
lowerCamelCase__ : List[str] = inputa
lowerCamelCase__ : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 368
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 369
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=2, lowerCamelCase_=3_2, lowerCamelCase_=1_6, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=4, lowerCamelCase_=[0, 1, 2, 3], lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=[1, 3_8_4, 2_4, 2_4], lowerCamelCase_=True, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = backbone_out_indices
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Union[str, Any] = num_labels
lowerCamelCase__ : Any = backbone_featmap_shape
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : str = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Union[str, Any] = (image_size // patch_size) ** 2
lowerCamelCase__ : Tuple = num_patches + 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [9_6, 1_9_2, 3_8_4, 7_6_8],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=lowerCamelCase_, backbone_featmap_shape=self.backbone_featmap_shape, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = DPTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : List[Any] = DPTForDepthEstimation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Tuple = DPTForSemanticSegmentation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = config_and_inputs
lowerCamelCase__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ : Union[str, Any] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : str = False
lowerCamelCase__ : str = False
lowerCamelCase__ : List[Any] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = DPTModelTester(self )
lowerCamelCase__ : int = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, nn.Linear ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[str] = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = True
if model_class in get_values(lowerCamelCase_ ):
continue
lowerCamelCase__ : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : str = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ ).loss
loss.backward()
def a__ (self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = True
if model_class in get_values(lowerCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
lowerCamelCase__ : List[str] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ : List[Any] = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : str = model(**lowerCamelCase_ ).loss
loss.backward()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Any = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(config=lowerCamelCase_ )
# Skip the check for the backbone
lowerCamelCase__ : int = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCamelCase__ : Dict = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a__ (self ):
'''simple docstring'''
pass
@slow
def a__ (self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCamelCase__ : Optional[Any] = DPTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Any = 'add'
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : Dict = DPTForDepthEstimation(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
lowerCamelCase__ : int = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**lowerCamelCase_ )
lowerCamelCase__ : Dict = outputs.predicted_depth
# verify the predicted depth
lowerCamelCase__ : List[Any] = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape, lowerCamelCase_ )
lowerCamelCase__ : str = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0, lowerCamelCase_, atol=1e-4 ) )
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A_ : List[Any] = None
A_ : str = logging.get_logger(__name__)
A_ : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A_ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A_ : Dict = {
"facebook/mbart-large-en-ro": 10_24,
"facebook/mbart-large-cc25": 10_24,
}
# fmt: off
A_ : List[str] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = ['input_ids', 'attention_mask']
lowerCamelCase__ : Union[str, Any] = MBartTokenizer
lowerCamelCase__ : List[int] = []
lowerCamelCase__ : List[int] = []
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase_, tokenizer_file=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, sep_token=lowerCamelCase_, cls_token=lowerCamelCase_, unk_token=lowerCamelCase_, pad_token=lowerCamelCase_, mask_token=lowerCamelCase_, src_lang=lowerCamelCase_, tgt_lang=lowerCamelCase_, additional_special_tokens=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : int = vocab_file
lowerCamelCase__ : Tuple = False if not self.vocab_file else True
lowerCamelCase__ : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : Optional[Any] = src_lang if src_lang is not None else 'en_XX'
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__ (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : str = src_lang
lowerCamelCase__ : str = self(lowerCamelCase_, add_special_tokens=lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.convert_tokens_to_ids(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = tgt_lang_id
return inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "en_XX", lowerCamelCase_ = None, lowerCamelCase_ = "ro_RO", **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = src_lang
lowerCamelCase__ : str = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a__ (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Any = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase__ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : int = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.convert_tokens_to_ids(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Tuple = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase__ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCamelCase__ : Optional[Any] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file, lowerCamelCase_ )
return (out_vocab_file,)
| 350
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ : int = logging.get_logger(__name__)
A_ : Tuple = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'longformer'
def __init__(self, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 2, lowerCamelCase_ = 1, lowerCamelCase_ = 0, lowerCamelCase_ = 2, lowerCamelCase_ = 3_0_5_2_2, lowerCamelCase_ = 7_6_8, lowerCamelCase_ = 1_2, lowerCamelCase_ = 1_2, lowerCamelCase_ = 3_0_7_2, lowerCamelCase_ = "gelu", lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 2, lowerCamelCase_ = 0.02, lowerCamelCase_ = 1e-12, lowerCamelCase_ = False, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : str = attention_window
lowerCamelCase__ : List[Any] = sep_token_id
lowerCamelCase__ : Union[str, Any] = bos_token_id
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : int = onnx_export
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = "default", lowerCamelCase_ = None ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = True
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = super().outputs
if self.task == "default":
lowerCamelCase__ : Union[str, Any] = {0: 'batch'}
return outputs
@property
def a__ (self ):
'''simple docstring'''
return 1e-4
@property
def a__ (self ):
'''simple docstring'''
return max(super().default_onnx_opset, 1_4 )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = super().generate_dummy_inputs(
preprocessor=lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ : List[Any] = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCamelCase__ : Tuple = 1
return inputs
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class a_ ( snake_case_ ):
lowerCamelCase__ : Optional[int] = 'mgp-str'
def __init__(self, lowerCamelCase_=[3_2, 1_2_8], lowerCamelCase_=4, lowerCamelCase_=3, lowerCamelCase_=2_7, lowerCamelCase_=3_8, lowerCamelCase_=5_0_2_5_7, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=4.0, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=1e-5, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=False, lowerCamelCase_=0.02, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Tuple = patch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Tuple = max_token_length
lowerCamelCase__ : Dict = num_character_labels
lowerCamelCase__ : Tuple = num_bpe_labels
lowerCamelCase__ : Tuple = num_wordpiece_labels
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : List[str] = mlp_ratio
lowerCamelCase__ : Optional[Any] = distilled
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : Dict = drop_rate
lowerCamelCase__ : str = qkv_bias
lowerCamelCase__ : Dict = attn_drop_rate
lowerCamelCase__ : List[str] = drop_path_rate
lowerCamelCase__ : Tuple = output_aa_attentions
lowerCamelCase__ : Tuple = initializer_range
| 352
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = GPTaTokenizer
lowerCamelCase__ : Optional[int] = GPTaTokenizerFast
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : str = {'add_prefix_space': True}
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase__ : List[str] = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
lowerCamelCase__ : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'lower newer'
lowerCamelCase__ : int = 'lower newer'
return input_text, output_text
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase__ : str = 'lower newer'
lowerCamelCase__ : List[str] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase__ : List[str] = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = 'lower newer'
# Testing tokenization
lowerCamelCase__ : int = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : str = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids without special tokens
lowerCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Dict = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids with special tokens
lowerCamelCase__ : Tuple = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing the unknown token
lowerCamelCase__ : int = tokens + [rust_tokenizer.unk_token]
lowerCamelCase__ : Tuple = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
pass
def a__ (self, lowerCamelCase_=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_, **lowerCamelCase_ )
# Simple input
lowerCamelCase__ : Optional[Any] = 'This is a simple input'
lowerCamelCase__ : Optional[int] = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ : Any = ('This is a simple input', 'This is a pair')
lowerCamelCase__ : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Simple input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Simple input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length', )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Pair input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length', )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token='<pad>' )
# Simple input
lowerCamelCase__ : Tuple = 'This is a simple input'
lowerCamelCase__ : str = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase__ : int = ('This is a simple input', 'This is a pair')
lowerCamelCase__ : Dict = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase__ : Optional[int] = tokenizer.pad_token_id
lowerCamelCase__ : Any = tokenizer(lowerCamelCase_, padding='max_length', max_length=3_0, return_tensors='np' )
lowerCamelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors='np' )
lowerCamelCase__ : str = tokenizer(*lowerCamelCase_, padding='max_length', max_length=6_0, return_tensors='np' )
lowerCamelCase__ : int = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = '$$$'
lowerCamelCase__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=lowerCamelCase_, add_bos_token=lowerCamelCase_ )
lowerCamelCase__ : str = 'This is a simple input'
lowerCamelCase__ : int = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ : List[str] = tokenizer.bos_token_id
lowerCamelCase__ : List[str] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
self.assertEqual(out_s.input_ids[0], lowerCamelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase__ : int = tokenizer.decode(out_s.input_ids )
lowerCamelCase__ : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], lowerCamelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [self.get_tokenizer(do_lower_case=lowerCamelCase_, add_bos_token=lowerCamelCase_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Optional[Any] = 'Encode this.'
lowerCamelCase__ : Optional[Any] = 'This one too please.'
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
encoded_sequence += tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer.encode_plus(
lowerCamelCase_, lowerCamelCase_, add_special_tokens=lowerCamelCase_, return_special_tokens_mask=lowerCamelCase_, )
lowerCamelCase__ : str = encoded_sequence_dict['input_ids']
lowerCamelCase__ : Any = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
lowerCamelCase__ : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCamelCase_ )
]
lowerCamelCase__ : List[str] = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained('facebook/opt-350m', from_slow=lowerCamelCase_ )
lowerCamelCase__ : Tuple = 'A photo of a cat'
lowerCamelCase__ : Tuple = tokenizer.encode(
lowerCamelCase_, )
self.assertEqual(lowerCamelCase_, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('test_opt' )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('./test_opt' )
lowerCamelCase__ : Any = tokenizer.encode(
lowerCamelCase_, )
self.assertEqual(lowerCamelCase_, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained('facebook/opt-350m', use_slow=lowerCamelCase_ )
lowerCamelCase__ : int = 'A photo of a cat'
lowerCamelCase__ : Optional[Any] = tokenizer.encode(
lowerCamelCase_, )
# Same as above
self.assertEqual(lowerCamelCase_, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained('facebook/opt-350m', from_slow=lowerCamelCase_ )
lowerCamelCase__ : Dict = 'bos'
lowerCamelCase__ : List[Any] = tokenizer.get_vocab()['bos']
lowerCamelCase__ : List[Any] = 'A photo of a cat'
lowerCamelCase__ : Dict = tokenizer.encode(
lowerCamelCase_, )
# We changed the bos token
self.assertEqual(lowerCamelCase_, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('./tok' )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
lowerCamelCase__ : Dict = tokenizer.encode(
lowerCamelCase_, )
self.assertEqual(lowerCamelCase_, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 353
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
lowerCamelCase__ : int = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCamelCase__ : str = True
for i in range(1 , s + 1 ):
lowerCamelCase__ : Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCamelCase__ : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCamelCase__ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCamelCase__ : List[Any] = s - 2 * j
break
return diff
| 354
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while b:
lowerCamelCase__ : Dict = b, a % b
return a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return a if b == 0 else euclidean_gcd_recursive(_lowerCamelCase , a % b )
def lowerCamelCase_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 355
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : UNetaDModel
lowerCamelCase__ : ScoreSdeVeScheduler
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 2_0_0_0, lowerCamelCase_ = None, lowerCamelCase_ = "pil", lowerCamelCase_ = True, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.unet.config.sample_size
lowerCamelCase__ : Dict = (batch_size, 3, img_size, img_size)
lowerCamelCase__ : List[Any] = self.unet
lowerCamelCase__ : Optional[Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
lowerCamelCase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase__ : int = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCamelCase__ : Optional[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
lowerCamelCase__ : List[str] = self.scheduler.step_correct(lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ).prev_sample
# prediction step
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, lowerCamelCase_ ).sample
lowerCamelCase__ : Any = self.scheduler.step_pred(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ )
lowerCamelCase__ : Any = output.prev_sample, output.prev_sample_mean
lowerCamelCase__ : Union[str, Any] = sample_mean.clamp(0, 1 )
lowerCamelCase__ : Optional[Any] = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : Optional[Any] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 356
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 0
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 357
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 358
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 0
|
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Tuple = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PegasusTokenizer
lowerCamelCase__ : Dict = PegasusTokenizerFast
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Optional[Any] = True
def a__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] = PegasusTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return ("This is a test", "This is a test")
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = '</s>'
lowerCamelCase__ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ), lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<pad>' )
self.assertEqual(vocab_keys[1], '</s>' )
self.assertEqual(vocab_keys[-1], 'v' )
self.assertEqual(len(lowerCamelCase_ ), 1_1_0_3 )
def a__ (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_1_0_3 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCamelCase__ : Any = rust_tokenizer([raw_input_str], return_tensors=lowerCamelCase_, add_special_tokens=lowerCamelCase_ ).input_ids[0]
lowerCamelCase__ : Any = py_tokenizer([raw_input_str], return_tensors=lowerCamelCase_, add_special_tokens=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase__ : Optional[Any] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCamelCase__ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase__ : Union[str, Any] = tokenizer([raw_input_str], return_tensors=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase__ : List[str] = 'To ensure a smooth flow of bank resolutions.'
lowerCamelCase__ : int = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase__ : List[Any] = tokenizer([raw_input_str], return_tensors=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['This is going to be way too long.' * 1_5_0, 'short example']
lowerCamelCase__ : Optional[Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCamelCase__ : Optional[int] = self._large_tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncation=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : Any = self._large_tokenizer(
text_target=lowerCamelCase_, max_length=5, padding=lowerCamelCase_, truncation=lowerCamelCase_, return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase_ ) == 2 # input_ids, attention_mask.
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = {'input_ids': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_, model_name='google/bigbird-pegasus-large-arxiv', revision='ba85d0851d708441f91440d509690f1ab6353415', )
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PegasusTokenizer
lowerCamelCase__ : List[str] = PegasusTokenizerFast
lowerCamelCase__ : Dict = True
lowerCamelCase__ : str = True
def a__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[Any] = PegasusTokenizer(lowerCamelCase_, offset=0, mask_token_sent=lowerCamelCase_, mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return ("This is a test", "This is a test")
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase__ : str = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[int] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCamelCase__ : List[str] = rust_tokenizer([raw_input_str], return_tensors=lowerCamelCase_, add_special_tokens=lowerCamelCase_ ).input_ids[0]
lowerCamelCase__ : Optional[int] = py_tokenizer([raw_input_str], return_tensors=lowerCamelCase_, add_special_tokens=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['This is going to be way too long.' * 1_0_0_0, 'short example']
lowerCamelCase__ : Tuple = ['not super long but more than 5 tokens', 'tiny']
lowerCamelCase__ : Optional[Any] = self._large_tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncation=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : Any = self._large_tokenizer(
text_target=lowerCamelCase_, max_length=5, padding=lowerCamelCase_, truncation=lowerCamelCase_, return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase_ ) == 2 # input_ids, attention_mask.
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCamelCase__ : List[Any] = self._large_tokenizer(lowerCamelCase_ ).input_ids
self.assertListEqual(
lowerCamelCase_, [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1], )
| 359
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 0
|
import string
from math import logaa
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
lowerCamelCase__ : str = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ : int = corpus_without_punctuation.split('\n' )
lowerCamelCase__ : int = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCamelCase ))
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return round(tf * idf , 3 )
| 360
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 0
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
lowerCamelCase__ : str = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
lowerCamelCase_, split=lowerCamelCase_, features=lowerCamelCase_, cache_dir=lowerCamelCase_, keep_in_memory=lowerCamelCase_, streaming=lowerCamelCase_, num_proc=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : str = path_or_paths if isinstance(lowerCamelCase_, lowerCamelCase_ ) else {self.split: path_or_paths}
lowerCamelCase__ : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
lowerCamelCase__ : List[Any] = Parquet(
cache_dir=lowerCamelCase_, data_files=lowerCamelCase_, features=lowerCamelCase_, hash=lowerCamelCase_, **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
if self.streaming:
lowerCamelCase__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_, download_mode=lowerCamelCase_, verification_mode=lowerCamelCase_, base_path=lowerCamelCase_, num_proc=self.num_proc, )
lowerCamelCase__ : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase_, in_memory=self.keep_in_memory )
return dataset
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : List[Any] = path_or_buf
lowerCamelCase__ : Tuple = batch_size or get_writer_batch_size(dataset.features )
lowerCamelCase__ : str = parquet_writer_kwargs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with open(self.path_or_buf, 'wb+' ) as buffer:
lowerCamelCase__ : List[str] = self._write(file_obj=lowerCamelCase_, batch_size=lowerCamelCase_, **self.parquet_writer_kwargs )
else:
lowerCamelCase__ : int = self._write(file_obj=self.path_or_buf, batch_size=lowerCamelCase_, **self.parquet_writer_kwargs )
return written
def a__ (self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 0
lowerCamelCase__ : List[str] = parquet_writer_kwargs.pop('path_or_buf', lowerCamelCase_ )
lowerCamelCase__ : str = self.dataset.features.arrow_schema
lowerCamelCase__ : str = pq.ParquetWriter(lowerCamelCase_, schema=lowerCamelCase_, **lowerCamelCase_ )
for offset in logging.tqdm(
range(0, len(self.dataset ), lowerCamelCase_ ), unit='ba', disable=not logging.is_progress_bar_enabled(), desc='Creating parquet from Arrow format', ):
lowerCamelCase__ : List[Any] = query_table(
table=self.dataset._data, key=slice(lowerCamelCase_, offset + batch_size ), indices=self.dataset._indices if self.dataset._indices is not None else None, )
writer.write_table(lowerCamelCase_ )
written += batch.nbytes
writer.close()
return written
| 361
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if len(_lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(_lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
lowerCamelCase__ : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def lowerCamelCase_ ( _lowerCamelCase ):
if len(_lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
lowerCamelCase__ : Union[str, Any] = len(_lowerCamelCase )
lowerCamelCase__ : Any = matrix_length // 2
lowerCamelCase__ : Tuple = [[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase )]
lowerCamelCase__ : str = [
[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )
]
lowerCamelCase__ : Dict = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase )]
lowerCamelCase__ : List[str] = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCamelCase_ ( _lowerCamelCase ):
return len(_lowerCamelCase ), len(matrix[0] )
def lowerCamelCase_ ( _lowerCamelCase ):
print('\n'.join(str(_lowerCamelCase ) for line in matrix ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if matrix_dimensions(_lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Tuple = split_matrix(_lowerCamelCase )
lowerCamelCase__ : int = split_matrix(_lowerCamelCase )
lowerCamelCase__ : Dict = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Tuple = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
lowerCamelCase__ : Optional[int] = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
lowerCamelCase__ : int = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Any = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Any = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : List[str] = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : int = matrix_addition(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
lowerCamelCase__ : Dict = matrix_addition(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[Any] = matrix_addition(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
# construct the new matrix from our 4 quadrants
lowerCamelCase__ : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if matrix_dimensions(_lowerCamelCase )[1] != matrix_dimensions(_lowerCamelCase )[0]:
lowerCamelCase__ : List[Any] = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = matrix_dimensions(_lowerCamelCase )
lowerCamelCase__ : Dict = matrix_dimensions(_lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCamelCase__ : int = max(*_lowerCamelCase , *_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(_lowerCamelCase ) ) ) )
lowerCamelCase__ : Union[str, Any] = matrixa
lowerCamelCase__ : Union[str, Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCamelCase__ : List[str] = actual_strassen(_lowerCamelCase , _lowerCamelCase )
# Removing the additional zeros
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A_ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A_ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 0
|
from math import pow
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase__ : Dict = int(pow(_lowerCamelCase , _lowerCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase__ : List[Any] = backtrack(
_lowerCamelCase , _lowerCamelCase , current_number + 1 , _lowerCamelCase , _lowerCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase__ : List[str] = backtrack(
_lowerCamelCase , _lowerCamelCase , current_number + 1 , _lowerCamelCase , _lowerCamelCase )
return current_sum, solutions_count
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(_lowerCamelCase , _lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = ''
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase_ ( _lowerCamelCase ):
return data[1:] + data[0]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = ''
for i in range(len(_lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = int('0b' + data[0] + data[-1] , 2 )
lowerCamelCase__ : List[str] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = message[:4]
lowerCamelCase__ : Tuple = message[4:]
lowerCamelCase__ : Optional[int] = apply_table(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = xor(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[str] = apply_sbox(_lowerCamelCase , temp[:4] ) # noqa: E741
lowerCamelCase__ : Optional[Any] = apply_sbox(_lowerCamelCase , temp[4:] )
lowerCamelCase__ : List[str] = '0' * (2 - len(_lowerCamelCase )) + l # noqa: E741
lowerCamelCase__ : Dict = '0' * (2 - len(_lowerCamelCase )) + r
lowerCamelCase__ : Union[str, Any] = apply_table(l + r , _lowerCamelCase )
lowerCamelCase__ : List[str] = xor(_lowerCamelCase , _lowerCamelCase )
return temp + right
if __name__ == "__main__":
A_ : str = input("Enter 10 bit key: ")
A_ : List[str] = input("Enter 8 bit message: ")
A_ : Optional[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
A_ : List[str] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
A_ : List[Any] = [2, 4, 3, 1]
A_ : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
A_ : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
A_ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
A_ : Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
A_ : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
A_ : Optional[Any] = apply_table(key, paa_table)
A_ : Any = temp[:5]
A_ : Any = temp[5:]
A_ : List[str] = left_shift(left)
A_ : Union[str, Any] = left_shift(right)
A_ : Dict = apply_table(left + right, pa_table)
A_ : Optional[int] = left_shift(left)
A_ : Union[str, Any] = left_shift(right)
A_ : List[Any] = left_shift(left)
A_ : str = left_shift(right)
A_ : Optional[Any] = apply_table(left + right, pa_table)
# encryption
A_ : str = apply_table(message, IP)
A_ : str = function(expansion, sa, sa, keya, temp)
A_ : Union[str, Any] = temp[4:] + temp[:4]
A_ : List[str] = function(expansion, sa, sa, keya, temp)
A_ : int = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
A_ : int = apply_table(CT, IP)
A_ : Any = function(expansion, sa, sa, keya, temp)
A_ : str = temp[4:] + temp[:4]
A_ : List[Any] = function(expansion, sa, sa, keya, temp)
A_ : List[str] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
"""simple docstring"""
from math import pi, sqrt
def lowerCamelCase_ ( _lowerCamelCase ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(_lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(_lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase_ ( ):
assert gamma(0.5 ) == sqrt(_lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ : Any = 1.0
while num:
A_ : Tuple = float(input("Gamma of: "))
print(f"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 365
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : str = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
def __init__(self, lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = PILImageResampling.BILINEAR, lowerCamelCase_ = True, lowerCamelCase_ = 1 / 2_5_5, lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = size if size is not None else {'shortest_edge': 3_8_4}
lowerCamelCase__ : List[str] = get_size_dict(lowerCamelCase_, default_to_square=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = do_resize
lowerCamelCase__ : str = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase__ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase__ : List[Any] = resample
lowerCamelCase__ : Optional[int] = do_rescale
lowerCamelCase__ : Union[str, Any] = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = PILImageResampling.BICUBIC, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = get_size_dict(lowerCamelCase_, default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase__ : Optional[int] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase__ : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase__ : Tuple = get_resize_output_image_size(lowerCamelCase_, size=lowerCamelCase_, default_to_square=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = resize(image=lowerCamelCase_, size=lowerCamelCase_, resample=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase_, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase_, **lowerCamelCase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase_, size=(shortest_edge, shortest_edge), resample=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
return rescale(lowerCamelCase_, scale=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
return normalize(lowerCamelCase_, mean=lowerCamelCase_, std=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = ChannelDimension.FIRST, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : int = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Union[str, Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : str = resample if resample is not None else self.resample
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase__ : str = size if size is not None else self.size
lowerCamelCase__ : Tuple = get_size_dict(lowerCamelCase_, default_to_square=lowerCamelCase_ )
lowerCamelCase__ : int = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Any = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowerCamelCase__ : Optional[int] = [self.resize(image=lowerCamelCase_, size=lowerCamelCase_, crop_pct=lowerCamelCase_, resample=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCamelCase__ : Optional[Any] = [self.rescale(image=lowerCamelCase_, scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCamelCase__ : Tuple = [self.normalize(image=lowerCamelCase_, mean=lowerCamelCase_, std=lowerCamelCase_ ) for image in images]
lowerCamelCase__ : Optional[Any] = [to_channel_dimension_format(lowerCamelCase_, lowerCamelCase_ ) for image in images]
lowerCamelCase__ : List[str] = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase_, tensor_type=lowerCamelCase_ )
| 366
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = IFInpaintingSuperResolutionPipeline
lowerCamelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCamelCase__ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCamelCase__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Tuple = floats_tensor((1, 3, 1_6, 1_6), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def a__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a__ (self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def a__ (self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ (self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ (self ):
'''simple docstring'''
self._test_save_load_local()
def a__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 367
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 0
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=0.0, lowerCamelCase_ = None, lowerCamelCase_ = "geglu", lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = True, lowerCamelCase_ = "layer_norm", lowerCamelCase_ = False, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Union[str, Any] = only_cross_attention
lowerCamelCase__ : Dict = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
lowerCamelCase__ : Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase__ : Optional[int] = AdaLayerNorm(lowerCamelCase_, lowerCamelCase_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[int] = AdaLayerNormZero(lowerCamelCase_, lowerCamelCase_ )
else:
lowerCamelCase__ : List[Any] = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ )
lowerCamelCase__ : List[str] = Attention(
query_dim=lowerCamelCase_, heads=lowerCamelCase_, dim_head=lowerCamelCase_, dropout=lowerCamelCase_, bias=lowerCamelCase_, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=lowerCamelCase_, )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase__ : Optional[int] = (
AdaLayerNorm(lowerCamelCase_, lowerCamelCase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ )
)
lowerCamelCase__ : List[Any] = Attention(
query_dim=lowerCamelCase_, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=lowerCamelCase_, dim_head=lowerCamelCase_, dropout=lowerCamelCase_, bias=lowerCamelCase_, upcast_attention=lowerCamelCase_, ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
# 3. Feed-forward
lowerCamelCase__ : str = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = FeedForward(lowerCamelCase_, dropout=lowerCamelCase_, activation_fn=lowerCamelCase_, final_dropout=lowerCamelCase_ )
# let chunk size default to None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : str = 0
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = chunk_size
lowerCamelCase__ : Optional[Any] = dim
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, ):
'''simple docstring'''
if self.use_ada_layer_norm:
lowerCamelCase__ : Optional[int] = self.norma(lowerCamelCase_, lowerCamelCase_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ : Tuple = self.norma(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase__ : int = self.norma(lowerCamelCase_ )
lowerCamelCase__ : Any = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase__ : str = self.attna(
lowerCamelCase_, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=lowerCamelCase_, **lowerCamelCase_, )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : int = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase__ : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase__ : Tuple = (
self.norma(lowerCamelCase_, lowerCamelCase_ ) if self.use_ada_layer_norm else self.norma(lowerCamelCase_ )
)
lowerCamelCase__ : Dict = self.attna(
lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, attention_mask=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : str = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase__ : Optional[int] = self.norma(lowerCamelCase_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
lowerCamelCase__ : int = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase__ : Optional[Any] = torch.cat(
[self.ff(lowerCamelCase_ ) for hid_slice in norm_hidden_states.chunk(lowerCamelCase_, dim=self._chunk_dim )], dim=self._chunk_dim, )
else:
lowerCamelCase__ : List[str] = self.ff(lowerCamelCase_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase__ : int = ff_output + hidden_states
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = 4, lowerCamelCase_ = 0.0, lowerCamelCase_ = "geglu", lowerCamelCase_ = False, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Tuple = int(dim * mult )
lowerCamelCase__ : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase__ : Optional[Any] = GELU(lowerCamelCase_, lowerCamelCase_ )
if activation_fn == "gelu-approximate":
lowerCamelCase__ : Dict = GELU(lowerCamelCase_, lowerCamelCase_, approximate='tanh' )
elif activation_fn == "geglu":
lowerCamelCase__ : str = GEGLU(lowerCamelCase_, lowerCamelCase_ )
elif activation_fn == "geglu-approximate":
lowerCamelCase__ : Dict = ApproximateGELU(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = nn.ModuleList([] )
# project in
self.net.append(lowerCamelCase_ )
# project dropout
self.net.append(nn.Dropout(lowerCamelCase_ ) )
# project out
self.net.append(nn.Linear(lowerCamelCase_, lowerCamelCase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCamelCase_ ) )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for module in self.net:
lowerCamelCase__ : Optional[Any] = module(lowerCamelCase_ )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = "none" ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[int] = nn.Linear(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = approximate
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowerCamelCase_, approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ), approximate=self.approximate ).to(dtype=gate.dtype )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.proj(lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.gelu(lowerCamelCase_ )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Union[str, Any] = nn.Linear(lowerCamelCase_, dim_out * 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowerCamelCase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.proj(lowerCamelCase_ ).chunk(2, dim=-1 )
return hidden_states * self.gelu(lowerCamelCase_ )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Any = nn.Linear(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.proj(lowerCamelCase_ )
return x * torch.sigmoid(1.702 * x )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[int] = nn.Embedding(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = nn.SiLU()
lowerCamelCase__ : int = nn.Linear(lowerCamelCase_, embedding_dim * 2 )
lowerCamelCase__ : Tuple = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.linear(self.silu(self.emb(lowerCamelCase_ ) ) )
lowerCamelCase__ : Optional[Any] = torch.chunk(lowerCamelCase_, 2 )
lowerCamelCase__ : Optional[int] = self.norm(lowerCamelCase_ ) * (1 + scale) + shift
return x
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Tuple = CombinedTimestepLabelEmbeddings(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = nn.SiLU()
lowerCamelCase__ : List[Any] = nn.Linear(lowerCamelCase_, 6 * embedding_dim, bias=lowerCamelCase_ )
lowerCamelCase__ : int = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_, eps=1e-6 )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.linear(self.silu(self.emb(lowerCamelCase_, lowerCamelCase_, hidden_dtype=lowerCamelCase_ ) ) )
lowerCamelCase__ : str = emb.chunk(6, dim=1 )
lowerCamelCase__ : List[str] = self.norm(lowerCamelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = 1e-5 ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[Any] = num_groups
lowerCamelCase__ : List[Any] = eps
if act_fn is None:
lowerCamelCase__ : int = None
else:
lowerCamelCase__ : Optional[int] = get_activation(lowerCamelCase_ )
lowerCamelCase__ : List[str] = nn.Linear(lowerCamelCase_, out_dim * 2 )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self.act:
lowerCamelCase__ : List[Any] = self.act(lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.linear(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = emb[:, :, None, None]
lowerCamelCase__ : List[str] = emb.chunk(2, dim=1 )
lowerCamelCase__ : int = F.group_norm(lowerCamelCase_, self.num_groups, eps=self.eps )
lowerCamelCase__ : Optional[int] = x * (1 + scale) + shift
return x
| 368
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 0
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=7 ):
lowerCamelCase__ : List[str] = None
if token is not None:
lowerCamelCase__ : Dict = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowerCamelCase__ : List[Any] = '636036'
lowerCamelCase__ : int = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowerCamelCase__ : Optional[int] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
return result["workflow_runs"]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = get_daily_ci_runs(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase__ : int = workflow_run['id']
break
return workflow_run_id
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = get_last_daily_ci_runs(_lowerCamelCase )
if workflow_run_id is not None:
lowerCamelCase__ : Optional[Any] = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase__ : int = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {}
for artifact_name in artifact_names:
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''{artifact_name}.zip''' )
if os.path.isfile(_lowerCamelCase ):
lowerCamelCase__ : Dict = {}
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
with z.open(_lowerCamelCase ) as f:
lowerCamelCase__ : str = f.read().decode('UTF-8' )
return results
| 369
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
lowerCamelCase__ : str = 4
lowerCamelCase__ : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase__ : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
from functools import reduce
A_ : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCamelCase_ ( _lowerCamelCase = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowerCamelCase , _lowerCamelCase : str(int(_lowerCamelCase ) * int(_lowerCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(_lowerCamelCase ) - 12 ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 371
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=9_9, lowerCamelCase_=1_3, lowerCamelCase_=1_6, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=2, lowerCamelCase_=3_2, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_=3_0, lowerCamelCase_=0, lowerCamelCase_=1, lowerCamelCase_=2, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Dict = decoder_seq_length
# For common tests
lowerCamelCase__ : int = self.decoder_seq_length
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Optional[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : List[Any] = d_model
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Union[str, Any] = decoder_layers
lowerCamelCase__ : Optional[int] = decoder_layers
lowerCamelCase__ : List[Any] = decoder_ffn_dim
lowerCamelCase__ : int = decoder_attention_heads
lowerCamelCase__ : List[str] = decoder_attention_heads
lowerCamelCase__ : str = eos_token_id
lowerCamelCase__ : int = bos_token_id
lowerCamelCase__ : List[Any] = pad_token_id
lowerCamelCase__ : Any = decoder_start_token_id
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Any = decoder_seq_length
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : int = 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
lowerCamelCase__ : str = None
if self.use_attention_mask:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2 )
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
lowerCamelCase__ : Any = TrOCRConfig(
vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, )
return (config, input_ids, attention_mask, lm_labels)
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : int = True
lowerCamelCase__ : str = TrOCRDecoder(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
lowerCamelCase__ : str = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, use_cache=lowerCamelCase_ )
self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) )
self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) + 1 )
lowerCamelCase__ : Tuple = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Dict = ids_tensor((2, 1), config.vocab_size - 1 ) + 1
# append to next input_ids and
lowerCamelCase__ : str = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )['last_hidden_state']
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, past_key_values=lowerCamelCase_ )['last_hidden_state']
# select random slice
lowerCamelCase__ : Tuple = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowerCamelCase__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase__ : Tuple = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Optional[Any] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : str = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = TrOCRStandaloneDecoderModelTester(self, is_training=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = ConfigTester(self, config_class=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def a__ (self ):
'''simple docstring'''
pass
| 350
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_lowerCamelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ):
lowerCamelCase__ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : int = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : str = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = dct.pop(_lowerCamelCase )
lowerCamelCase__ : str = val
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Tuple = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Tuple = 3129
lowerCamelCase__ : List[Any] = 'huggingface/label-files'
lowerCamelCase__ : int = 'vqa2-id2label.json'
lowerCamelCase__ : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = idalabel
lowerCamelCase__ : str = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Dict = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Tuple = 2
lowerCamelCase__ : Union[str, Any] = {0: 'False', 1: 'True'}
lowerCamelCase__ : Tuple = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Dict = 3
lowerCamelCase__ : str = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : Any = True
lowerCamelCase__ : Dict = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )['state_dict']
lowerCamelCase__ : Optional[int] = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : Tuple = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ : List[Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
lowerCamelCase__ : int = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCamelCase__ : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : str = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_lowerCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_lowerCamelCase ).raw )
lowerCamelCase__ : str = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowerCamelCase__ : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : List[Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : Tuple = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : Optional[Any] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=_lowerCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : Tuple = 'a bunch of [MASK] laying on a [MASK].'
else:
lowerCamelCase__ : Tuple = 'How many cats are there?'
lowerCamelCase__ : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : List[Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Dict = torch.Size([1, 11, 3_0522] )
lowerCamelCase__ : int = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : str = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : Any = torch.Size([1, 3129] )
lowerCamelCase__ : List[str] = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Optional[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : List[str] = torch.Size([1, 2] )
lowerCamelCase__ : Optional[int] = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A_ : Optional[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 352
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a_ :
'''simple docstring'''
lowerCamelCase__ : Any = PegasusConfig
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : int = 'gelu'
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=4_0, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=0, ):
'''simple docstring'''
lowerCamelCase__ : Any = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[str] = eos_token_id
lowerCamelCase__ : Dict = pad_token_id
lowerCamelCase__ : Optional[Any] = bos_token_id
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Union[str, Any] = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : List[str] = prepare_pegasus_inputs_dict(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return config, inputs_dict
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TFPegasusModel(config=lowerCamelCase_ ).get_decoder()
lowerCamelCase__ : Any = inputs_dict['input_ids']
lowerCamelCase__ : Tuple = input_ids[:1, :]
lowerCamelCase__ : str = inputs_dict['attention_mask'][:1, :]
lowerCamelCase__ : Optional[Any] = inputs_dict['head_mask']
lowerCamelCase__ : str = 1
# first forward pass
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, head_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : Tuple = tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : List[str] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : List[str] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_, lowerCamelCase_, rtol=1e-3 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : Any = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCamelCase__ : int = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : int = True
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Tuple = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFPegasusModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self, config_class=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCamelCase__ : str = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCamelCase__ : int = 'google/pegasus-xsum'
@cached_property
def a__ (self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.translate_src_text(**lowerCamelCase_ )
assert self.expected_text == generated_words
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.tokenizer(self.src_text, **lowerCamelCase_, padding=lowerCamelCase_, return_tensors='tf' )
lowerCamelCase__ : Tuple = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase_, )
lowerCamelCase__ : Tuple = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase_ )
return generated_words
@slow
def a__ (self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 353
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase__ : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase__ : List[str] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase__ : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase__ : Any = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = SwinConfig()
lowerCamelCase__ : List[str] = swin_name.split('_' )
lowerCamelCase__ : Tuple = name_split[1]
lowerCamelCase__ : List[str] = int(name_split[4] )
lowerCamelCase__ : List[Any] = int(name_split[3][-1] )
if model_size == "tiny":
lowerCamelCase__ : Union[str, Any] = 96
lowerCamelCase__ : Any = (2, 2, 6, 2)
lowerCamelCase__ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCamelCase__ : List[Any] = 96
lowerCamelCase__ : List[Any] = (2, 2, 18, 2)
lowerCamelCase__ : Any = (3, 6, 12, 24)
elif model_size == "base":
lowerCamelCase__ : Union[str, Any] = 128
lowerCamelCase__ : List[Any] = (2, 2, 18, 2)
lowerCamelCase__ : Union[str, Any] = (4, 8, 16, 32)
else:
lowerCamelCase__ : int = 192
lowerCamelCase__ : List[str] = (2, 2, 18, 2)
lowerCamelCase__ : str = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCamelCase__ : Tuple = 2_1841
else:
lowerCamelCase__ : List[Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Dict = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Dict = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = img_size
lowerCamelCase__ : Tuple = num_classes
lowerCamelCase__ : Optional[Any] = embed_dim
lowerCamelCase__ : List[Any] = depths
lowerCamelCase__ : Optional[int] = num_heads
lowerCamelCase__ : Optional[int] = window_size
return config
def lowerCamelCase_ ( _lowerCamelCase ):
if "patch_embed.proj" in name:
lowerCamelCase__ : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCamelCase__ : Any = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
lowerCamelCase__ : Any = 'encoder.' + name
if "attn.proj" in name:
lowerCamelCase__ : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ : List[Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
lowerCamelCase__ : Any = 'layernorm.weight'
if name == "norm.bias":
lowerCamelCase__ : str = 'layernorm.bias'
if "head" in name:
lowerCamelCase__ : str = name.replace('head' , 'classifier' )
else:
lowerCamelCase__ : List[str] = 'swin.' + name
return name
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : int = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCamelCase__ : Optional[int] = key.split('.' )
lowerCamelCase__ : Any = int(key_split[1] )
lowerCamelCase__ : List[str] = int(key_split[3] )
lowerCamelCase__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : Tuple = val[:dim, :]
lowerCamelCase__ : int = val[
dim : dim * 2, :
]
lowerCamelCase__ : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase__ : List[Any] = val[
:dim
]
lowerCamelCase__ : List[Any] = val[
dim : dim * 2
]
lowerCamelCase__ : Optional[Any] = val[
-dim:
]
else:
lowerCamelCase__ : Union[str, Any] = val
return orig_state_dict
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
lowerCamelCase__ : int = get_swin_config(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = SwinForImageClassification(_lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
lowerCamelCase__ : Any = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
lowerCamelCase__ : List[str] = image_processor(images=_lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : Any = timm_model(inputs['pixel_values'] )
lowerCamelCase__ : Optional[Any] = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A_ : List[str] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 355
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Optional[int] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'transfo-xl'
lowerCamelCase__ : List[str] = ['mems']
lowerCamelCase__ : Dict = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self, lowerCamelCase_=2_6_7_7_3_5, lowerCamelCase_=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0], lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_6, lowerCamelCase_=6_4, lowerCamelCase_=4_0_9_6, lowerCamelCase_=4, lowerCamelCase_=False, lowerCamelCase_=1_8, lowerCamelCase_=1_6_0_0, lowerCamelCase_=1_0_0_0, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=0, lowerCamelCase_=-1, lowerCamelCase_=True, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=True, lowerCamelCase_="normal", lowerCamelCase_=0.01, lowerCamelCase_=0.01, lowerCamelCase_=0.02, lowerCamelCase_=1e-5, lowerCamelCase_=0, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : int = []
self.cutoffs.extend(lowerCamelCase_ )
if proj_share_all_but_first:
lowerCamelCase__ : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase__ : str = [False] + [False] * len(self.cutoffs )
lowerCamelCase__ : Dict = d_model
lowerCamelCase__ : int = d_embed
lowerCamelCase__ : Union[str, Any] = d_head
lowerCamelCase__ : List[str] = d_inner
lowerCamelCase__ : List[str] = div_val
lowerCamelCase__ : Union[str, Any] = pre_lnorm
lowerCamelCase__ : Optional[int] = n_layer
lowerCamelCase__ : Dict = n_head
lowerCamelCase__ : List[Any] = mem_len
lowerCamelCase__ : Tuple = same_length
lowerCamelCase__ : List[Any] = attn_type
lowerCamelCase__ : List[Any] = clamp_len
lowerCamelCase__ : Union[str, Any] = sample_softmax
lowerCamelCase__ : Union[str, Any] = adaptive
lowerCamelCase__ : Optional[int] = dropout
lowerCamelCase__ : List[str] = dropatt
lowerCamelCase__ : Optional[int] = untie_r
lowerCamelCase__ : List[str] = init
lowerCamelCase__ : List[str] = init_range
lowerCamelCase__ : int = proj_init_std
lowerCamelCase__ : List[str] = init_std
lowerCamelCase__ : int = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 356
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 0
|
A_ : Tuple = 2_56
# Modulus to hash a string
A_ : str = 1_00_00_03
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = len(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = len(_lowerCamelCase )
if p_len > t_len:
return False
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCamelCase__ : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCamelCase__ : List[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCamelCase__ : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = 'abc1abc12'
lowerCamelCase__ : Tuple = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowerCamelCase__ : Union[str, Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase ) and not rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 2)
lowerCamelCase__ : List[Any] = 'ABABX'
lowerCamelCase__ : Optional[int] = 'ABABZABABYABABX'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 3)
lowerCamelCase__ : Any = 'AAAB'
lowerCamelCase__ : str = 'ABAAAAAB'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 4)
lowerCamelCase__ : List[Any] = 'abcdabcy'
lowerCamelCase__ : List[Any] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 5)
lowerCamelCase__ : List[Any] = 'Lü'
lowerCamelCase__ : Union[str, Any] = 'Lüsai'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[Any] = 'Lue'
assert not rabin_karp(_lowerCamelCase , _lowerCamelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 357
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : List[str] = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'van'
def __init__(self, lowerCamelCase_=2_2_4, lowerCamelCase_=3, lowerCamelCase_=[7, 3, 3, 3], lowerCamelCase_=[4, 2, 2, 2], lowerCamelCase_=[6_4, 1_2_8, 3_2_0, 5_1_2], lowerCamelCase_=[3, 3, 1_2, 3], lowerCamelCase_=[8, 8, 4, 4], lowerCamelCase_="gelu", lowerCamelCase_=0.02, lowerCamelCase_=1e-6, lowerCamelCase_=1e-2, lowerCamelCase_=0.0, lowerCamelCase_=0.0, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : List[str] = patch_sizes
lowerCamelCase__ : int = strides
lowerCamelCase__ : Optional[int] = hidden_sizes
lowerCamelCase__ : Optional[Any] = depths
lowerCamelCase__ : List[str] = mlp_ratios
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Any = layer_scale_init_value
lowerCamelCase__ : Dict = drop_path_rate
lowerCamelCase__ : Union[str, Any] = dropout_rate
| 359
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : str = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'switch_transformers'
lowerCamelCase__ : List[Any] = ['past_key_values']
lowerCamelCase__ : Optional[int] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, lowerCamelCase_=3_2_1_2_8, lowerCamelCase_=7_6_8, lowerCamelCase_=6_4, lowerCamelCase_=2_0_4_8, lowerCamelCase_=6_4, lowerCamelCase_=1_2, lowerCamelCase_=3, lowerCamelCase_=1_2, lowerCamelCase_=3, lowerCamelCase_=1_2, lowerCamelCase_=8, lowerCamelCase_=False, lowerCamelCase_=0.01, lowerCamelCase_="float32", lowerCamelCase_=False, lowerCamelCase_=3_2, lowerCamelCase_=1_2_8, lowerCamelCase_=0.1, lowerCamelCase_=1e-6, lowerCamelCase_=0.001, lowerCamelCase_=0.001, lowerCamelCase_=1.0, lowerCamelCase_="relu", lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=0, lowerCamelCase_=1, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Union[str, Any] = d_kv
lowerCamelCase__ : Any = d_ff
lowerCamelCase__ : Optional[Any] = num_sparse_encoder_layers
lowerCamelCase__ : Union[str, Any] = num_layers
lowerCamelCase__ : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase__ : List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCamelCase__ : str = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCamelCase__ : Optional[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCamelCase__ : Optional[int] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCamelCase__ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCamelCase__ : Optional[Any] = num_heads
lowerCamelCase__ : int = num_experts
lowerCamelCase__ : int = expert_capacity
lowerCamelCase__ : str = router_bias
lowerCamelCase__ : List[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowerCamelCase__ : Optional[Any] = router_dtype
lowerCamelCase__ : Optional[Any] = router_ignore_padding_tokens
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : int = relative_attention_max_distance
lowerCamelCase__ : Union[str, Any] = dropout_rate
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : List[str] = initializer_factor
lowerCamelCase__ : Union[str, Any] = feed_forward_proj
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : Optional[int] = add_router_probs
lowerCamelCase__ : Dict = router_z_loss_coef
lowerCamelCase__ : Union[str, Any] = router_aux_loss_coef
lowerCamelCase__ : Optional[int] = self.feed_forward_proj.split('-' )
lowerCamelCase__ : Union[str, Any] = act_info[-1]
lowerCamelCase__ : int = act_info[0] == 'gated'
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase__ : Any = 'gelu_new'
super().__init__(
pad_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, is_encoder_decoder=lowerCamelCase_, **lowerCamelCase_, )
| 360
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 0
|
"""simple docstring"""
A_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
A_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowerCamelCase__ : List[Any] = (
f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
f'''Valid values are: {', '.join(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A_ : Optional[int] = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
A_ : Any = {
"gpt2": 10_24,
"gpt2-medium": 10_24,
"gpt2-large": 10_24,
"gpt2-xl": 10_24,
"distilgpt2": 10_24,
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
lowerCamelCase__ : Dict = GPTaTokenizer
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="<|endoftext|>", lowerCamelCase_="<|endoftext|>", lowerCamelCase_="<|endoftext|>", lowerCamelCase_=False, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
lowerCamelCase_, lowerCamelCase_, tokenizer_file=lowerCamelCase_, unk_token=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, add_prefix_space=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = kwargs.pop('add_bos_token', lowerCamelCase_ )
lowerCamelCase__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(lowerCamelCase_, pre_tok_state.pop('type' ) )
lowerCamelCase__ : Tuple = add_prefix_space
lowerCamelCase__ : int = pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : int = add_prefix_space
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = kwargs.get('is_split_into_words', lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = kwargs.get('is_split_into_words', lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self._tokenizer.model.save(lowerCamelCase_, name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ ) + [self.eos_token_id] )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 363
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 0
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCamelCase_ ( _lowerCamelCase=None , _lowerCamelCase=None ):
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class a_ :
'''simple docstring'''
lowerCamelCase__ : str = field(
metadata={'help': 'The csv file to plot.'} , )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
lowerCamelCase__ : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
lowerCamelCase__ : Optional[List[str]] = list_field(
default=snake_case_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def lowerCamelCase_ ( _lowerCamelCase ):
try:
int(_lowerCamelCase )
return True
except ValueError:
return False
def lowerCamelCase_ ( _lowerCamelCase ):
try:
float(_lowerCamelCase )
return True
except ValueError:
return False
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = args
lowerCamelCase__ : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file, newline='' ) as csv_file:
lowerCamelCase__ : Any = csv.DictReader(lowerCamelCase_ )
for row in reader:
lowerCamelCase__ : Any = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
lowerCamelCase__ : Optional[Any] = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
lowerCamelCase__ : Tuple = float(row['result'] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = plt.subplots()
lowerCamelCase__ : Dict = 'Time usage' if self.args.is_time else 'Memory usage'
lowerCamelCase__ : Optional[Any] = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCamelCase__ : Dict = sorted(set(self.result_dict[model_name]['bsz'] ) )
lowerCamelCase__ : Any = sorted(set(self.result_dict[model_name]['seq_len'] ) )
lowerCamelCase__ : int = self.result_dict[model_name]['result']
(lowerCamelCase__) : Optional[int] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCamelCase__ : int = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCamelCase__ : Tuple = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results], dtype=lowerCamelCase_, )
else:
lowerCamelCase__ : Tuple = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results], dtype=np.floataa, )
(lowerCamelCase__) : Union[str, Any] = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
lowerCamelCase__ : int = np.asarray(lowerCamelCase_, lowerCamelCase_ )[: len(lowerCamelCase_ )]
plt.scatter(
lowerCamelCase_, lowerCamelCase_, label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowerCamelCase_, lowerCamelCase_, '--' )
title_str += f''' {label_model_name} vs.'''
lowerCamelCase__ : Any = title_str[:-4]
lowerCamelCase__ : List[str] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCamelCase_ )
plt.xlabel(lowerCamelCase_ )
plt.ylabel(lowerCamelCase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCamelCase_ ( ):
lowerCamelCase__ : Tuple = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
lowerCamelCase__ : Dict = Plot(args=_lowerCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'markuplm'
def __init__(self, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=0, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=2_5_6, lowerCamelCase_=1_0_2_4, lowerCamelCase_=2_1_6, lowerCamelCase_=1_0_0_1, lowerCamelCase_=3_2, lowerCamelCase_=5_0, lowerCamelCase_="absolute", lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Dict = position_embedding_type
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : List[str] = classifier_dropout
# additional properties
lowerCamelCase__ : Dict = max_depth
lowerCamelCase__ : Any = max_xpath_tag_unit_embeddings
lowerCamelCase__ : Tuple = max_xpath_subs_unit_embeddings
lowerCamelCase__ : int = tag_pad_id
lowerCamelCase__ : Optional[Any] = subs_pad_id
lowerCamelCase__ : Union[str, Any] = xpath_unit_hidden_size
| 365
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 0
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = cva.getAffineTransform(_lowerCamelCase , _lowerCamelCase )
return cva.warpAffine(_lowerCamelCase , _lowerCamelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : Union[str, Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
A_ : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ : Any = gray_img.shape
# set different points to rotate image
A_ : Union[str, Any] = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
A_ : Optional[Any] = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
A_ : List[Any] = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
A_ : str = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
A_ : str = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Optional[Any] = plt.figure(1)
A_ : Tuple = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 366
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowerCamelCase__ : Union[str, Any] = (low + high) // 2
lowerCamelCase__ : Any = max_subarray(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[Any] = max_subarray(_lowerCamelCase , mid + 1 , _lowerCamelCase )
lowerCamelCase__ : int = max_cross_sum(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = float('-inf' ), -1
lowerCamelCase__ : List[str] = float('-inf' ), -1
lowerCamelCase__ : int | float = 0
for i in range(_lowerCamelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowerCamelCase__ : Any = summ
lowerCamelCase__ : List[str] = i
lowerCamelCase__ : str = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowerCamelCase__ : Tuple = summ
lowerCamelCase__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = [randint(1 , _lowerCamelCase ) for _ in range(_lowerCamelCase )]
lowerCamelCase__ : str = time.time()
max_subarray(_lowerCamelCase , 0 , input_size - 1 )
lowerCamelCase__ : Union[str, Any] = time.time()
return end - start
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
lowerCamelCase__ : Tuple = [time_max_subarray(_lowerCamelCase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(_lowerCamelCase , _lowerCamelCase ):
print(_lowerCamelCase , '\t\t' , _lowerCamelCase )
plt.plot(_lowerCamelCase , _lowerCamelCase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 0
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 368
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Tuple = {"vocab_file": "spiece.model"}
A_ : Tuple = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
A_ : Dict = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
A_ : Optional[Any] = "▁"
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, lowerCamelCase_, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_="[CLS]", lowerCamelCase_="[SEP]", lowerCamelCase_="<unk>", lowerCamelCase_="[SEP]", lowerCamelCase_="<pad>", lowerCamelCase_="[CLS]", lowerCamelCase_="[MASK]", lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = (
AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_, normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_, lowerCamelCase_ )
else mask_token
)
lowerCamelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_, remove_space=lowerCamelCase_, keep_accents=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = do_lower_case
lowerCamelCase__ : Optional[int] = remove_space
lowerCamelCase__ : Any = keep_accents
lowerCamelCase__ : List[str] = vocab_file
lowerCamelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.sp_model )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.__dict__.copy()
lowerCamelCase__ : Any = None
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if self.remove_space:
lowerCamelCase__ : List[Any] = ' '.join(inputs.strip().split() )
else:
lowerCamelCase__ : int = inputs
lowerCamelCase__ : str = outputs.replace('``', '"' ).replace('\'\'', '"' )
if not self.keep_accents:
lowerCamelCase__ : Tuple = unicodedata.normalize('NFKD', lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = ''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
lowerCamelCase__ : Union[str, Any] = outputs.lower()
return outputs
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.preprocess_text(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
lowerCamelCase__ : Any = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCamelCase__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_, '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase__ : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase__ : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[str] = ''
lowerCamelCase__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[int] = []
else:
current_sub_tokens.append(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : str = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_, token_ids_a=lowerCamelCase_, already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Optional[int] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, 'wb' ) as fi:
lowerCamelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 369
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 0
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A_ : int = logging.get_logger(__name__)
A_ : Optional[Any] = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'bart'
lowerCamelCase__ : int = ['past_key_values']
lowerCamelCase__ : Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, lowerCamelCase_=5_0_2_6_5, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_="gelu", lowerCamelCase_=1_0_2_4, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=0.0, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=3, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=True, lowerCamelCase_=2, lowerCamelCase_=2, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[int] = encoder_ffn_dim
lowerCamelCase__ : Optional[Any] = encoder_layers
lowerCamelCase__ : str = encoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Optional[int] = decoder_layers
lowerCamelCase__ : str = decoder_attention_heads
lowerCamelCase__ : Union[str, Any] = dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : str = activation_dropout
lowerCamelCase__ : List[str] = activation_function
lowerCamelCase__ : Any = init_std
lowerCamelCase__ : List[Any] = encoder_layerdrop
lowerCamelCase__ : List[Any] = decoder_layerdrop
lowerCamelCase__ : List[str] = classifier_dropout
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Any = encoder_layers
lowerCamelCase__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowerCamelCase_, pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, is_encoder_decoder=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, forced_eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated', lowerCamelCase_ ):
lowerCamelCase__ : str = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowerCamelCase__ : Any = {0: 'batch'}
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCamelCase__ : str = {0: 'batch', 1: 'decoder_sequence'}
lowerCamelCase__ : Dict = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_, direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowerCamelCase__ : str = self.num_layers
for i in range(lowerCamelCase_ ):
lowerCamelCase__ : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
lowerCamelCase__ : int = {0: 'batch', 2: 'past_sequence + sequence'}
else:
lowerCamelCase__ : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def a__ (self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] = super().outputs
else:
lowerCamelCase__ : int = super(lowerCamelCase_, self ).outputs
if self.use_past:
lowerCamelCase__ : str = self.num_layers
for i in range(lowerCamelCase_ ):
lowerCamelCase__ : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
lowerCamelCase__ : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# Generate decoder inputs
lowerCamelCase__ : str = seq_length if not self.use_past else 1
lowerCamelCase__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Any = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : List[Any] = dict(**lowerCamelCase_, **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase__ : Optional[int] = common_inputs['input_ids'].shape
lowerCamelCase__ : Union[str, Any] = common_inputs['decoder_input_ids'].shape[1]
lowerCamelCase__ : Any = self.num_attention_heads
lowerCamelCase__ : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Dict = decoder_seq_length + 3
lowerCamelCase__ : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : Any = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCamelCase_, lowerCamelCase_ )], dim=1 )
lowerCamelCase__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ : Any = self.num_layers
lowerCamelCase__ : Dict = min(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Tuple = max(lowerCamelCase_, lowerCamelCase_ ) - min_num_layers
lowerCamelCase__ : str = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
lowerCamelCase__ : List[str] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCamelCase_, lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase__ : Optional[int] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Any = seqlen + 2
lowerCamelCase__ : Any = self.num_layers
lowerCamelCase__ : Dict = self.num_attention_heads
lowerCamelCase__ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : List[Any] = common_inputs['attention_mask'].dtype
lowerCamelCase__ : List[str] = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCamelCase_, lowerCamelCase_, dtype=lowerCamelCase_ )], dim=1 )
lowerCamelCase__ : Union[str, Any] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : Any = compute_effective_axis_dimension(
lowerCamelCase_, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : Any = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
lowerCamelCase__ : str = compute_effective_axis_dimension(
lowerCamelCase_, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Union[str, Any] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] = dict(tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_ ) )
return common_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
elif self.task == "causal-lm":
lowerCamelCase__ : Tuple = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
else:
lowerCamelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
return common_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Any = super()._flatten_past_key_values_(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
else:
lowerCamelCase__ : Any = super(lowerCamelCase_, self )._flatten_past_key_values_(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
A_ : Tuple = [8, 5, 9, 7]
A_ : Any = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A_ : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : str = claim_vector
lowerCamelCase__ : List[Any] = allocated_resources_table
lowerCamelCase__ : Dict = maximum_claim_table
def a__ (self ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def a__ (self ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def a__ (self ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def a__ (self ):
'''simple docstring'''
return {self.__need().index(lowerCamelCase_ ): i for i in self.__need()}
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.__need()
lowerCamelCase__ : List[Any] = self.__allocated_resources_table
lowerCamelCase__ : Optional[Any] = self.__available_resources()
lowerCamelCase__ : Optional[int] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 5_0 + '\n' )
while need_list:
lowerCamelCase__ : List[Any] = False
for each_need in need_list:
lowerCamelCase__ : Tuple = True
for index, need in enumerate(lowerCamelCase_ ):
if need > available_resources[index]:
lowerCamelCase__ : Optional[int] = False
break
if execution:
lowerCamelCase__ : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : Dict = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(lowerCamelCase_ )
# update available/freed resources stack
lowerCamelCase__ : List[str] = np.array(lowerCamelCase_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(lowerCamelCase_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def a__ (self ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(lowerCamelCase_ ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(lowerCamelCase_ ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(lowerCamelCase_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(lowerCamelCase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( __lowercase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """depth_multiplier""" ) )
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=0.25 , lowerCamelCase=8 , lowerCamelCase=True , lowerCamelCase=1_024 , lowerCamelCase=32 , lowerCamelCase="relu6" , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = depth_multiplier
_lowerCAmelCase = min_depth
_lowerCAmelCase = tf_padding
_lowerCAmelCase = int(last_hidden_size * depth_multiplier )
_lowerCAmelCase = output_stride
_lowerCAmelCase = hidden_act
_lowerCAmelCase = classifier_dropout_prob
_lowerCAmelCase = use_labels
_lowerCAmelCase = is_training
_lowerCAmelCase = num_labels
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ (self ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MobileNetVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
__UpperCamelCase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaModelTester(self )
_lowerCAmelCase = MobileNetVaConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowerCamelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def A__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = 26
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MobileNetVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def A__ (self ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(lowerCamelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**lowerCamelCase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_lowerCAmelCase = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 1
|
"""simple docstring"""
import qiskit
def __UpperCAmelCase ( snake_case_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_lowerCAmelCase = qubits
# Using Aer's simulator
_lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
_lowerCAmelCase = qiskit.QuantumCircuit(snake_case_ , snake_case_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , snake_case_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , snake_case_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(snake_case_ ) ) , list(range(snake_case_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCAmelCase = qiskit.execute(snake_case_ , snake_case_ , shots=1000 )
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 317
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase = 1
_lowerCAmelCase = 1
while repunit:
_lowerCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'{solution() = }')
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'vit_mae'
def __init__(self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=16 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2_048 , lowerCamelCase=0.75 , lowerCamelCase=False , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = decoder_num_attention_heads
_lowerCAmelCase = decoder_hidden_size
_lowerCAmelCase = decoder_num_hidden_layers
_lowerCAmelCase = decoder_intermediate_size
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = norm_pix_loss
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = '''▁'''
SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE : Optional[int] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
SCREAMING_SNAKE_CASE : str = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_0_2_4,
}
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = []
__UpperCamelCase = []
def __init__(self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase , tgt_lang=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase )
}
_lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase = src_lang if src_lang is not None else """en_XX"""
_lowerCAmelCase = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A__ (self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A__ (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ):
'''simple docstring'''
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = """"""
_lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
_lowerCAmelCase = True
_lowerCAmelCase = []
else:
current_sub_tokens.append(lowerCamelCase )
_lowerCAmelCase = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
_lowerCAmelCase = [1] * len(self.prefix_tokens )
_lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase )) + ([0] * len(lowerCamelCase )) + suffix_ones
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = self.convert_tokens_to_ids(lowerCamelCase )
_lowerCAmelCase = tgt_lang_id
return inputs
def A__ (self , lowerCamelCase , lowerCamelCase = "en_XX" , lowerCamelCase = None , lowerCamelCase = "ro_RO" , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
def A__ (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A__ (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.lang_code_to_id[src_lang]
_lowerCAmelCase = [self.cur_lang_code_id]
_lowerCAmelCase = [self.eos_token_id]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.lang_code_to_id[tgt_lang]
_lowerCAmelCase = [self.cur_lang_code_id]
_lowerCAmelCase = [self.eos_token_id]
| 317
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = """"""
for i in table:
res += inp[i - 1]
return res
def __UpperCAmelCase ( snake_case_ : str ) -> Tuple:
"""simple docstring"""
return data[1:] + data[0]
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> Any:
"""simple docstring"""
_lowerCAmelCase = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = int("""0b""" + data[0] + data[-1] , 2 )
_lowerCAmelCase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Dict ) -> str:
"""simple docstring"""
_lowerCAmelCase = message[:4]
_lowerCAmelCase = message[4:]
_lowerCAmelCase = apply_table(snake_case_ , snake_case_ )
_lowerCAmelCase = xor(snake_case_ , snake_case_ )
_lowerCAmelCase = apply_sbox(snake_case_ , temp[:4] ) # noqa: E741
_lowerCAmelCase = apply_sbox(snake_case_ , temp[4:] )
_lowerCAmelCase = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_lowerCAmelCase = """0""" * (2 - len(snake_case_ )) + r
_lowerCAmelCase = apply_table(l + r , snake_case_ )
_lowerCAmelCase = xor(snake_case_ , snake_case_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = input('''Enter 10 bit key: ''')
SCREAMING_SNAKE_CASE : Optional[int] = input('''Enter 8 bit message: ''')
SCREAMING_SNAKE_CASE : str = [6, 3, 7, 4, 8, 5, 1_0, 9]
SCREAMING_SNAKE_CASE : Optional[Any] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE : str = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE : List[str] = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE : Dict = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE : Optional[int] = temp[:5]
SCREAMING_SNAKE_CASE : str = temp[5:]
SCREAMING_SNAKE_CASE : List[Any] = left_shift(left)
SCREAMING_SNAKE_CASE : int = left_shift(right)
SCREAMING_SNAKE_CASE : Optional[Any] = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE : int = left_shift(left)
SCREAMING_SNAKE_CASE : Dict = left_shift(right)
SCREAMING_SNAKE_CASE : int = left_shift(left)
SCREAMING_SNAKE_CASE : str = left_shift(right)
SCREAMING_SNAKE_CASE : Union[str, Any] = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE : int = apply_table(message, IP)
SCREAMING_SNAKE_CASE : Optional[int] = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE : int = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE : Tuple = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE : List[str] = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
SCREAMING_SNAKE_CASE : Dict = apply_table(CT, IP)
SCREAMING_SNAKE_CASE : Optional[int] = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE : Optional[Any] = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE : Tuple = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 317
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_lowerCAmelCase = [True] * (num + 1)
_lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case_ ):
_lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Dict = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 317
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 1
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
SCREAMING_SNAKE_CASE : Tuple = '''path-to-your-trained-model'''
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
SCREAMING_SNAKE_CASE : Any = '''A photo of sks dog in a bucket'''
SCREAMING_SNAKE_CASE : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 1
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( __lowercase ):
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_lowerCAmelCase = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_lowerCAmelCase = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_lowerCAmelCase = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCamelCase )
BertModel.from_pretrained(lowerCamelCase )
BertTokenizer.from_pretrained(lowerCamelCase )
pipeline(task="""fill-mask""" , model=lowerCamelCase )
# baseline - just load from_pretrained with normal network
_lowerCAmelCase = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_lowerCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase = """1"""
_lowerCAmelCase = subprocess.run(lowerCamelCase , env=lowerCamelCase , check=lowerCamelCase , capture_output=lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_lowerCAmelCase = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_lowerCAmelCase = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_lowerCAmelCase = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCamelCase )
BertModel.from_pretrained(lowerCamelCase )
BertTokenizer.from_pretrained(lowerCamelCase )
pipeline(task="""fill-mask""" , model=lowerCamelCase )
# baseline - just load from_pretrained with normal network
_lowerCAmelCase = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_lowerCAmelCase = self.get_env()
_lowerCAmelCase = subprocess.run(lowerCamelCase , env=lowerCamelCase , check=lowerCamelCase , capture_output=lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_lowerCAmelCase = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_lowerCAmelCase = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_lowerCAmelCase = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_lowerCAmelCase = self.get_env()
_lowerCAmelCase = subprocess.run(lowerCamelCase , env=lowerCamelCase , check=lowerCamelCase , capture_output=lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
_lowerCAmelCase = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase = """1"""
_lowerCAmelCase = subprocess.run(lowerCamelCase , env=lowerCamelCase , check=lowerCamelCase , capture_output=lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """
from transformers import pipeline
"""
_lowerCAmelCase = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_lowerCAmelCase = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_lowerCAmelCase = self.get_env()
_lowerCAmelCase = """1"""
_lowerCAmelCase = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
_lowerCAmelCase = subprocess.run(lowerCamelCase , env=lowerCamelCase , check=lowerCamelCase , capture_output=lowerCamelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """
from transformers import AutoModel
"""
_lowerCAmelCase = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_lowerCAmelCase = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_lowerCAmelCase = self.get_env()
_lowerCAmelCase = subprocess.run(lowerCamelCase , env=lowerCamelCase , check=lowerCamelCase , capture_output=lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase = """1"""
_lowerCAmelCase = subprocess.run(lowerCamelCase , env=lowerCamelCase , check=lowerCamelCase , capture_output=lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 317
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCamelCase = Features({'image': Image()} )
__UpperCamelCase = Features({'labels': ClassLabel} )
__UpperCamelCase = "image"
__UpperCamelCase = "labels"
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_lowerCAmelCase = copy.deepcopy(self )
_lowerCAmelCase = self.label_schema.copy()
_lowerCAmelCase = features[self.label_column]
_lowerCAmelCase = label_schema
return task_template
@property
def A__ (self ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 317
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 3 , snake_case_ : int = 7 , snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 1
for current_denominator in range(1 , limit + 1 ):
_lowerCAmelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_lowerCAmelCase = current_numerator
_lowerCAmelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 317
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 1
|
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
SCREAMING_SNAKE_CASE : Any = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
SCREAMING_SNAKE_CASE : List[Any] = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
SCREAMING_SNAKE_CASE : str = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def A__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=4 , lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase = compute_bleu(
reference_corpus=lowerCamelCase , translation_corpus=lowerCamelCase , max_order=lowerCamelCase , smooth=lowerCamelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 317
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = set(range(3 , snake_case_ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case_ , snake_case_ ) ) )
_lowerCAmelCase = [float(snake_case_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case_ , limit + 1 , snake_case_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 1
|
"""simple docstring"""
from typing import List
import numpy as np
def __UpperCAmelCase ( snake_case_ : dict ) -> int:
"""simple docstring"""
_lowerCAmelCase = {key: len(snake_case_ ) for key, value in gen_kwargs.items() if isinstance(snake_case_ , snake_case_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
_lowerCAmelCase = max(lists_lengths.values() , default=0 )
return max(1 , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> List[range]:
"""simple docstring"""
_lowerCAmelCase = []
for group_idx in range(snake_case_ ):
_lowerCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCAmelCase = range(snake_case_ , start + num_shards_to_add )
shards_indices_per_group.append(snake_case_ )
return shards_indices_per_group
def __UpperCAmelCase ( snake_case_ : dict , snake_case_ : int ) -> List[dict]:
"""simple docstring"""
_lowerCAmelCase = _number_of_shards_in_gen_kwargs(snake_case_ )
if num_shards == 1:
return [dict(snake_case_ )]
else:
_lowerCAmelCase = _distribute_shards(num_shards=snake_case_ , max_num_jobs=snake_case_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case_ , snake_case_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case_ ) )
]
def __UpperCAmelCase ( snake_case_ : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __UpperCAmelCase ( snake_case_ : np.random.Generator , snake_case_ : dict ) -> dict:
"""simple docstring"""
_lowerCAmelCase = {len(snake_case_ ) for value in gen_kwargs.values() if isinstance(snake_case_ , snake_case_ )}
_lowerCAmelCase = {}
for size in list_sizes:
_lowerCAmelCase = list(range(snake_case_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCAmelCase = dict(snake_case_ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = [value[i] for i in indices_per_size[len(snake_case_ )]]
return shuffled_kwargs
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 1
|
"""simple docstring"""
from ....utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=2_048 ):
'''simple docstring'''
_lowerCAmelCase = config.__dict__
_lowerCAmelCase = modal_hidden_size
if num_labels:
_lowerCAmelCase = num_labels
| 317
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Optional[Any]=1000 ) -> Union[str, Any]:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_lowerCAmelCase = n - 1
_lowerCAmelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_lowerCAmelCase = 0
while count < prec:
_lowerCAmelCase = random.randint(2 , n - 1 )
_lowerCAmelCase = bin_exp_mod(snake_case_ , snake_case_ , snake_case_ )
if b != 1:
_lowerCAmelCase = True
for _ in range(snake_case_ ):
if b == n - 1:
_lowerCAmelCase = False
break
_lowerCAmelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 1
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
SCREAMING_SNAKE_CASE : Tuple = '''.'''
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
SCREAMING_SNAKE_CASE : Optional[int] = line.strip()
SCREAMING_SNAKE_CASE : Tuple = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
SCREAMING_SNAKE_CASE : Dict = '''\n'''.join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 317
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 1
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 1
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list:
"""simple docstring"""
_lowerCAmelCase = [True] * n
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCAmelCase = i * 2
while index < n:
_lowerCAmelCase = False
_lowerCAmelCase = index + i
_lowerCAmelCase = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def __UpperCAmelCase ( snake_case_ : int = 999966663333 ) -> int:
"""simple docstring"""
_lowerCAmelCase = math.floor(math.sqrt(snake_case_ ) ) + 100
_lowerCAmelCase = prime_sieve(snake_case_ )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCAmelCase = primes[prime_index + 1]
_lowerCAmelCase = last_prime**2
_lowerCAmelCase = next_prime**2
# Get numbers divisible by lps(current)
_lowerCAmelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCAmelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCAmelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCAmelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 317
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.model_type == "bert":
SCREAMING_SNAKE_CASE : Tuple = BertForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
SCREAMING_SNAKE_CASE : Dict = model.state_dict()
SCREAMING_SNAKE_CASE : str = {}
for w in ["word_embeddings", "position_embeddings"]:
SCREAMING_SNAKE_CASE : int = state_dict[F'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : List[str] = state_dict[F'{prefix}.embeddings.LayerNorm.{w}']
SCREAMING_SNAKE_CASE : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : Dict = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
SCREAMING_SNAKE_CASE : Tuple = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
SCREAMING_SNAKE_CASE : Any = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
SCREAMING_SNAKE_CASE : List[str] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
SCREAMING_SNAKE_CASE : List[str] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
SCREAMING_SNAKE_CASE : Dict = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
SCREAMING_SNAKE_CASE : Tuple = state_dict['''cls.predictions.decoder.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : Any = state_dict[F'cls.predictions.transform.dense.{w}']
SCREAMING_SNAKE_CASE : int = state_dict[F'cls.predictions.transform.LayerNorm.{w}']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 317
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowerCamelCase :
__UpperCamelCase = LEDConfig
__UpperCamelCase = {}
__UpperCamelCase = 'gelu'
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=4 , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_lowerCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_lowerCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_lowerCAmelCase = prepare_led_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = tf.concat(
[tf.zeros_like(lowerCamelCase )[:, :-1], tf.ones_like(lowerCamelCase )[:, -1:]] , axis=-1 , )
_lowerCAmelCase = global_attention_mask
return config, inputs_dict
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFLEDModel(config=lowerCamelCase ).get_decoder()
_lowerCAmelCase = inputs_dict["""input_ids"""]
_lowerCAmelCase = input_ids[:1, :]
_lowerCAmelCase = inputs_dict["""attention_mask"""][:1, :]
_lowerCAmelCase = 1
# first forward pass
_lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
_lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1e-3 )
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=None , snake_case_ : List[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
_lowerCAmelCase = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowerCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
__UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFLEDModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = tf.zeros_like(inputs_dict["""attention_mask"""] )
_lowerCAmelCase = 2
_lowerCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
_lowerCAmelCase = True
_lowerCAmelCase = self.model_tester.seq_length
_lowerCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCamelCase ):
_lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCamelCase ):
_lowerCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
_lowerCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = model_class(lowerCamelCase )
_lowerCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_lowerCAmelCase = len(lowerCamelCase )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
if self.is_encoder_decoder:
_lowerCAmelCase = model_class(lowerCamelCase )
_lowerCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_decoder_attentions_output(lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(lowerCamelCase )
_lowerCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(lowerCamelCase )
_lowerCAmelCase = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( snake_case_ : Dict ) -> int:
"""simple docstring"""
return tf.constant(snake_case_ , dtype=tf.intaa )
SCREAMING_SNAKE_CASE : Optional[int] = 1e-4
@slow
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
_lowerCAmelCase = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCAmelCase = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCAmelCase = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model(**lowerCamelCase )[0]
_lowerCAmelCase = (1, 1_024, 768)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
_lowerCAmelCase = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1e-3 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
_lowerCAmelCase = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCAmelCase = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCAmelCase = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model(**lowerCamelCase )[0]
_lowerCAmelCase = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
_lowerCAmelCase = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1e-3 , rtol=1e-3 )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 100 ) -> int:
"""simple docstring"""
_lowerCAmelCase = n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'new-model'
if is_tf_available():
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = NewModelConfig
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """bert-base-cased"""
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = TFAutoModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """bert-base-cased"""
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
@require_tensorflow_probability
def A__ (self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = copy.deepcopy(model.config )
_lowerCAmelCase = ["""FunnelBaseModel"""]
_lowerCAmelCase = TFAutoModel.from_config(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase )
_lowerCAmelCase = TFAutoModel.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , lowerCamelCase )
_lowerCAmelCase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase ):
auto_class.register(lowerCamelCase , lowerCamelCase )
auto_class.register(lowerCamelCase , lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
auto_class.register(lowerCamelCase , lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase = BertModelTester(self ).get_config()
_lowerCAmelCase = NewModelConfig(**tiny_config.to_dict() )
_lowerCAmelCase = auto_class.from_config(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase )
_lowerCAmelCase = auto_class.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A__ (self ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
_lowerCAmelCase = TFAutoModel.from_pretrained("""bert-base""" )
def A__ (self ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_lowerCAmelCase = TFAutoModel.from_pretrained(lowerCamelCase , revision="""aaaaaa""" )
def A__ (self ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
_lowerCAmelCase = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def A__ (self ):
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase , """Use `from_pt=True` to load this model""" ):
_lowerCAmelCase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_lowerCAmelCase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCAmelCase = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
_lowerCAmelCase = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
__UpperCamelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class in get_values(lowerCamelCase ):
_lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = embedding_size
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFMobileBertModel(config=lowerCamelCase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = model(lowerCamelCase )
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(lowerCamelCase )
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFMobileBertForMaskedLM(config=lowerCamelCase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFMobileBertForNextSentencePrediction(config=lowerCamelCase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFMobileBertForPreTraining(config=lowerCamelCase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFMobileBertForSequenceClassification(config=lowerCamelCase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFMobileBertForMultipleChoice(config=lowerCamelCase )
_lowerCAmelCase = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFMobileBertForTokenClassification(config=lowerCamelCase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFMobileBertForQuestionAnswering(config=lowerCamelCase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def A__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase = TFMobileBertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(lowerCamelCase )[0]
_lowerCAmelCase = [1, 6, 30_522]
self.assertEqual(output.shape , lowerCamelCase )
_lowerCAmelCase = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1e-4 )
| 317
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_attention_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_choices
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_attention_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = FlaxBertModelTester(self )
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
| 317
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'mgp-str'
def __init__(self , lowerCamelCase=[32, 128] , lowerCamelCase=4 , lowerCamelCase=3 , lowerCamelCase=27 , lowerCamelCase=38 , lowerCamelCase=50_257 , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=4.0 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=False , lowerCamelCase=0.02 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = max_token_length
_lowerCAmelCase = num_character_labels
_lowerCAmelCase = num_bpe_labels
_lowerCAmelCase = num_wordpiece_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = distilled
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = drop_rate
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = attn_drop_rate
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = output_aa_attentions
_lowerCAmelCase = initializer_range
| 317
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_lowerCAmelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowerCAmelCase = model(lowerCamelCase )["""last_hidden_state"""]
_lowerCAmelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCamelCase )
# compare the actual values for a slice.
_lowerCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 317
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.