code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import datasets
_snake_case = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
_snake_case = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
_snake_case = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}), codebase_urls=[], reference_urls=[], format="numpy", )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return {"accuracy": simple_accuracy(__a, __a)}
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'openai/whisper-base'
lowerCamelCase__ = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
lowerCamelCase__ = 'transcriber'
lowerCamelCase__ = WhisperProcessor
lowerCamelCase__ = WhisperForConditionalGeneration
lowerCamelCase__ = ['audio']
lowerCamelCase__ = ['text']
def snake_case__ ( self, __a):
'''simple docstring'''
return self.pre_processor(__a, return_tensors="pt").input_features
def snake_case__ ( self, __a):
'''simple docstring'''
return self.model.generate(inputs=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.pre_processor.batch_decode(__a, skip_special_tokens=__a)[0]
| 300
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 1
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(_lowerCamelCase ) , _lowerCamelCase )
return number - int(_lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( _lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = image[0].size
_lowerCAmelCase , _lowerCAmelCase : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowerCAmelCase : Dict = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_lowerCAmelCase : Optional[Any] = np.concatenate(_lowerCamelCase , axis=0 )
_lowerCAmelCase : str = np.array(_lowerCamelCase ).astype(np.floataa ) / 2_55.0
_lowerCAmelCase : Any = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase : Any = 2.0 * image - 1.0
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase : Optional[int] = torch.cat(_lowerCamelCase , dim=0 )
return image
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : List[Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowerCAmelCase , _lowerCAmelCase : Dict = mask[0].size
_lowerCAmelCase , _lowerCAmelCase : Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase : int = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
_lowerCAmelCase : List[Any] = np.concatenate(_lowerCamelCase , axis=0 )
_lowerCAmelCase : str = mask.astype(np.floataa ) / 2_55.0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Any = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
_lowerCAmelCase : Optional[int] = torch.cat(_lowerCamelCase , dim=0 )
return mask
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a, scheduler=__a)
@torch.no_grad()
def __call__( self, __a, __a, __a = 250, __a = 0.0, __a = 10, __a = 10, __a = None, __a = "pil", __a = True, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = image
_lowerCAmelCase : Any = _preprocess_image(__a)
_lowerCAmelCase : Optional[int] = original_image.to(device=self.device, dtype=self.unet.dtype)
_lowerCAmelCase : Tuple = _preprocess_mask(__a)
_lowerCAmelCase : str = mask_image.to(device=self.device, dtype=self.unet.dtype)
_lowerCAmelCase : Optional[int] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__a, __a) and len(__a) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__a)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators.")
_lowerCAmelCase : Dict = original_image.shape
_lowerCAmelCase : Union[str, Any] = randn_tensor(__a, generator=__a, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(__a, __a, __a, self.device)
_lowerCAmelCase : Any = eta
_lowerCAmelCase : str = self.scheduler.timesteps[0] + 1
_lowerCAmelCase : Optional[Any] = generator[0] if isinstance(__a, __a) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
_lowerCAmelCase : Optional[Any] = self.unet(__a, __a).sample
# compute previous image: x_t -> x_t-1
_lowerCAmelCase : Dict = self.scheduler.step(__a, __a, __a, __a, __a, __a).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowerCAmelCase : List[str] = self.scheduler.undo_step(__a, __a, __a)
_lowerCAmelCase : Tuple = t
_lowerCAmelCase : int = (image / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : Optional[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : str = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 300
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 1
|
from math import isclose, sqrt
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = point_y / 4 / point_x
_lowerCAmelCase : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowerCAmelCase : List[str] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowerCAmelCase : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowerCAmelCase : Any = outgoing_gradient**2 + 4
_lowerCAmelCase : Optional[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowerCAmelCase : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 100
_lowerCAmelCase : Any = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_lowerCAmelCase : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowerCAmelCase : str = x_minus if isclose(_lowerCamelCase , _lowerCamelCase ) else x_plus
_lowerCAmelCase : Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def A ( _lowerCamelCase = 1.4 , _lowerCamelCase = -9.6 ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : float = first_x_coord
_lowerCAmelCase : float = first_y_coord
_lowerCAmelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = next_point(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/rembert": 256,
}
_snake_case = "▁"
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = RemBertTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a=True, __a=False, __a="[CLS]", __a="[SEP]", __a="<unk>", __a="[SEP]", __a="<pad>", __a="[CLS]", __a="[MASK]", **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
__a, tokenizer_file=__a, do_lower_case=__a, remove_space=__a, keep_accents=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, **__a, )
_lowerCAmelCase : Tuple = do_lower_case
_lowerCAmelCase : Optional[Any] = remove_space
_lowerCAmelCase : Optional[int] = keep_accents
_lowerCAmelCase : Optional[Any] = vocab_file
_lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [self.sep_token_id]
_lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error("Vocabulary path ({}) should be a directory".format(__a))
return
_lowerCAmelCase : Any = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file, __a)
return (out_vocab_file,)
| 300
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
_snake_case = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ElectraTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
__a, tokenizer_file=__a, do_lower_case=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, tokenize_chinese_chars=__a, strip_accents=__a, **__a, )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", __a) != do_lower_case
or normalizer_state.get("strip_accents", __a) != strip_accents
or normalizer_state.get("handle_chinese_chars", __a) != tokenize_chinese_chars
):
_lowerCAmelCase : Optional[Any] = getattr(__a, normalizer_state.pop("type"))
_lowerCAmelCase : Optional[int] = do_lower_case
_lowerCAmelCase : int = strip_accents
_lowerCAmelCase : Any = tokenize_chinese_chars
_lowerCAmelCase : Optional[int] = normalizer_class(**__a)
_lowerCAmelCase : Union[str, Any] = do_lower_case
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a="</s>", __a="<unk>", __a="<pad>", __a=125, __a=None, **__a, ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : List[str] = [f"<extra_id_{i}>" for i in range(__a)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : str = len(set(filter(lambda __a: bool("extra_id" in str(__a)), __a)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens")
_lowerCAmelCase : List[Any] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else pad_token
_lowerCAmelCase : int = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else eos_token
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else unk_token
super().__init__(
eos_token=__a, unk_token=__a, pad_token=__a, extra_ids=__a, additional_special_tokens=__a, **__a, )
_lowerCAmelCase : int = extra_ids
_lowerCAmelCase : Union[str, Any] = 2**8 # utf is 8 bits
# define special tokens dict
_lowerCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_lowerCAmelCase : List[str] = len(self.special_tokens_encoder)
_lowerCAmelCase : Tuple = len(__a)
for i, token in enumerate(__a):
_lowerCAmelCase : str = self.vocab_size + i - n
_lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case__ ( self):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a)) + [1]
return ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a):
'''simple docstring'''
if len(__a) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = self._add_eos_if_not_present(__a)
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : Dict = self._add_eos_if_not_present(__a)
return token_ids_a + token_ids_a
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [chr(__a) for i in text.encode("utf-8")]
return tokens
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.special_tokens_encoder:
_lowerCAmelCase : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_lowerCAmelCase : List[str] = self.added_tokens_encoder[token]
elif len(__a) != 1:
_lowerCAmelCase : List[Any] = self.unk_token_id
else:
_lowerCAmelCase : Dict = ord(__a) + self._num_special_tokens
return token_id
def snake_case__ ( self, __a):
'''simple docstring'''
if index in self.special_tokens_decoder:
_lowerCAmelCase : Tuple = self.special_tokens_decoder[index]
else:
_lowerCAmelCase : List[Any] = chr(index - self._num_special_tokens)
return token
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = B""
for token in tokens:
if token in self.special_tokens_decoder:
_lowerCAmelCase : Optional[Any] = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.added_tokens_decoder:
_lowerCAmelCase : Dict = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.special_tokens_encoder:
_lowerCAmelCase : str = token.encode("utf-8")
elif token in self.added_tokens_encoder:
_lowerCAmelCase : List[str] = token.encode("utf-8")
else:
_lowerCAmelCase : Optional[int] = bytes([ord(__a)])
bstring += tok_string
_lowerCAmelCase : int = bstring.decode("utf-8", errors="ignore")
return string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return ()
| 300
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 't5'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self, __a=3_2128, __a=512, __a=64, __a=2048, __a=6, __a=None, __a=8, __a=32, __a=128, __a=0.1, __a=1E-6, __a=1.0, __a="relu", __a=True, __a=True, __a=0, __a=1, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : List[str] = d_model
_lowerCAmelCase : List[str] = d_kv
_lowerCAmelCase : Optional[int] = d_ff
_lowerCAmelCase : Optional[int] = num_layers
_lowerCAmelCase : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCAmelCase : List[str] = num_heads
_lowerCAmelCase : Union[str, Any] = relative_attention_num_buckets
_lowerCAmelCase : Optional[int] = relative_attention_max_distance
_lowerCAmelCase : Union[str, Any] = dropout_rate
_lowerCAmelCase : Dict = layer_norm_epsilon
_lowerCAmelCase : Any = initializer_factor
_lowerCAmelCase : Optional[int] = feed_forward_proj
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : int = self.feed_forward_proj.split("-")
_lowerCAmelCase : Dict = act_info[-1]
_lowerCAmelCase : Optional[Any] = act_info[0] == "gated"
if len(__a) > 1 and act_info[0] != "gated" or len(__a) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCAmelCase : Any = "gelu_new"
super().__init__(
pad_token_id=__a, eos_token_id=__a, is_encoder_decoder=__a, **__a, )
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_lowerCAmelCase : Any = "past_encoder_sequence + sequence"
_lowerCAmelCase : Optional[int] = {0: "batch"}
_lowerCAmelCase : List[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase : Any = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a, direction="inputs")
return common_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return 13
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 1
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a, __a, __a=0.0, __a = None, __a = "geglu", __a = None, __a = False, __a = False, __a = False, __a = False, __a = True, __a = "layer_norm", __a = False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = only_cross_attention
_lowerCAmelCase : int = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_lowerCAmelCase : Dict = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.")
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_lowerCAmelCase : Any = AdaLayerNorm(__a, __a)
elif self.use_ada_layer_norm_zero:
_lowerCAmelCase : Union[str, Any] = AdaLayerNormZero(__a, __a)
else:
_lowerCAmelCase : Any = nn.LayerNorm(__a, elementwise_affine=__a)
_lowerCAmelCase : str = Attention(
query_dim=__a, heads=__a, dim_head=__a, dropout=__a, bias=__a, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=__a, )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_lowerCAmelCase : Optional[Any] = (
AdaLayerNorm(__a, __a)
if self.use_ada_layer_norm
else nn.LayerNorm(__a, elementwise_affine=__a)
)
_lowerCAmelCase : List[str] = Attention(
query_dim=__a, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=__a, dim_head=__a, dropout=__a, bias=__a, upcast_attention=__a, ) # is self-attn if encoder_hidden_states is none
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : int = None
# 3. Feed-forward
_lowerCAmelCase : List[str] = nn.LayerNorm(__a, elementwise_affine=__a)
_lowerCAmelCase : List[Any] = FeedForward(__a, dropout=__a, activation_fn=__a, final_dropout=__a)
# let chunk size default to None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = 0
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = chunk_size
_lowerCAmelCase : Tuple = dim
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, ):
'''simple docstring'''
if self.use_ada_layer_norm:
_lowerCAmelCase : Union[str, Any] = self.norma(__a, __a)
elif self.use_ada_layer_norm_zero:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = self.norma(
__a, __a, __a, hidden_dtype=hidden_states.dtype)
else:
_lowerCAmelCase : Tuple = self.norma(__a)
_lowerCAmelCase : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_lowerCAmelCase : int = self.attna(
__a, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=__a, **__a, )
if self.use_ada_layer_norm_zero:
_lowerCAmelCase : List[Any] = gate_msa.unsqueeze(1) * attn_output
_lowerCAmelCase : Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_lowerCAmelCase : Union[str, Any] = (
self.norma(__a, __a) if self.use_ada_layer_norm else self.norma(__a)
)
_lowerCAmelCase : str = self.attna(
__a, encoder_hidden_states=__a, attention_mask=__a, **__a, )
_lowerCAmelCase : Optional[Any] = attn_output + hidden_states
# 3. Feed-forward
_lowerCAmelCase : List[str] = self.norma(__a)
if self.use_ada_layer_norm_zero:
_lowerCAmelCase : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.")
_lowerCAmelCase : Dict = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_lowerCAmelCase : Tuple = torch.cat(
[self.ff(__a) for hid_slice in norm_hidden_states.chunk(__a, dim=self._chunk_dim)], dim=self._chunk_dim, )
else:
_lowerCAmelCase : Optional[Any] = self.ff(__a)
if self.use_ada_layer_norm_zero:
_lowerCAmelCase : List[str] = gate_mlp.unsqueeze(1) * ff_output
_lowerCAmelCase : List[Any] = ff_output + hidden_states
return hidden_states
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a = None, __a = 4, __a = 0.0, __a = "geglu", __a = False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = int(dim * mult)
_lowerCAmelCase : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_lowerCAmelCase : Optional[Any] = GELU(__a, __a)
if activation_fn == "gelu-approximate":
_lowerCAmelCase : int = GELU(__a, __a, approximate="tanh")
elif activation_fn == "geglu":
_lowerCAmelCase : Any = GEGLU(__a, __a)
elif activation_fn == "geglu-approximate":
_lowerCAmelCase : List[Any] = ApproximateGELU(__a, __a)
_lowerCAmelCase : Any = nn.ModuleList([])
# project in
self.net.append(__a)
# project dropout
self.net.append(nn.Dropout(__a))
# project out
self.net.append(nn.Linear(__a, __a))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
for module in self.net:
_lowerCAmelCase : Optional[Any] = module(__a)
return hidden_states
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a, __a = "none"):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = nn.Linear(__a, __a)
_lowerCAmelCase : Any = approximate
def snake_case__ ( self, __a):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__a, approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa), approximate=self.approximate).to(dtype=gate.dtype)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.proj(__a)
_lowerCAmelCase : Tuple = self.gelu(__a)
return hidden_states
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Tuple = nn.Linear(__a, dim_out * 2)
def snake_case__ ( self, __a):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__a)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.proj(__a).chunk(2, dim=-1)
return hidden_states * self.gelu(__a)
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Linear(__a, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.proj(__a)
return x * torch.sigmoid(1.702 * x)
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = nn.Embedding(__a, __a)
_lowerCAmelCase : Tuple = nn.SiLU()
_lowerCAmelCase : int = nn.Linear(__a, embedding_dim * 2)
_lowerCAmelCase : Optional[int] = nn.LayerNorm(__a, elementwise_affine=__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.linear(self.silu(self.emb(__a)))
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = torch.chunk(__a, 2)
_lowerCAmelCase : Optional[Any] = self.norm(__a) * (1 + scale) + shift
return x
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = CombinedTimestepLabelEmbeddings(__a, __a)
_lowerCAmelCase : List[str] = nn.SiLU()
_lowerCAmelCase : List[str] = nn.Linear(__a, 6 * embedding_dim, bias=__a)
_lowerCAmelCase : Optional[Any] = nn.LayerNorm(__a, elementwise_affine=__a, eps=1E-6)
def snake_case__ ( self, __a, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.linear(self.silu(self.emb(__a, __a, hidden_dtype=__a)))
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = emb.chunk(6, dim=1)
_lowerCAmelCase : str = self.norm(__a) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a, __a, __a = None, __a = 1E-5):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[Any] = num_groups
_lowerCAmelCase : List[str] = eps
if act_fn is None:
_lowerCAmelCase : Any = None
else:
_lowerCAmelCase : List[str] = get_activation(__a)
_lowerCAmelCase : List[str] = nn.Linear(__a, out_dim * 2)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if self.act:
_lowerCAmelCase : Any = self.act(__a)
_lowerCAmelCase : Tuple = self.linear(__a)
_lowerCAmelCase : Optional[int] = emb[:, :, None, None]
_lowerCAmelCase , _lowerCAmelCase : Any = emb.chunk(2, dim=1)
_lowerCAmelCase : Tuple = F.group_norm(__a, self.num_groups, eps=self.eps)
_lowerCAmelCase : Optional[Any] = x * (1 + scale) + shift
return x
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : Tuple = ""
else:
_lowerCAmelCase : Optional[Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[Any] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : int = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Dict = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , )
_lowerCAmelCase : List[Any] = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=384 , num_labels=1_000 )
_lowerCAmelCase : str = False
# load original model from timm
_lowerCAmelCase : Optional[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : str = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : List[Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = "huggingface/label-files"
_lowerCAmelCase : Optional[int] = "imagenet-1k-id2label.json"
_lowerCAmelCase : Any = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = idalabel
_lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : List[Any] = ViTHybridModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Any = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
_lowerCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
_lowerCAmelCase : Any = transform.transforms
_lowerCAmelCase : str = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowerCAmelCase : List[Any] = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : str = transform(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Optional[Any] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
_lowerCAmelCase : Any = model(_lowerCamelCase )
_lowerCAmelCase : Tuple = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_lowerCAmelCase : str = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : List[Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = SpeechTaTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[str] = SpeechTaTokenizer(__a)
_lowerCAmelCase : List[Any] = AddedToken("<mask>", lstrip=__a, rstrip=__a)
_lowerCAmelCase : List[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token})
tokenizer.add_tokens(["<ctc_blank>"])
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "this is a test"
_lowerCAmelCase : Union[str, Any] = "this is a test"
return input_text, output_text
def snake_case__ ( self, __a, __a=False, __a=20, __a=5):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.get_input_output_texts(__a)
_lowerCAmelCase : Optional[Any] = tokenizer.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : Dict = tokenizer.decode(__a, clean_up_tokenization_spaces=__a)
return text, ids
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "<pad>"
_lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a), __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<s>")
self.assertEqual(vocab_keys[1], "<pad>")
self.assertEqual(vocab_keys[-4], "œ")
self.assertEqual(vocab_keys[-2], "<mask>")
self.assertEqual(vocab_keys[-1], "<ctc_blank>")
self.assertEqual(len(__a), 81)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 79)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizers(do_lower_case=__a)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
_lowerCAmelCase : str = tokenizer.vocab_size
_lowerCAmelCase : Union[str, Any] = len(__a)
self.assertNotEqual(__a, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCAmelCase : Tuple = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCAmelCase : Optional[int] = tokenizer.add_tokens(__a)
_lowerCAmelCase : Union[str, Any] = tokenizer.vocab_size
_lowerCAmelCase : Union[str, Any] = len(__a)
self.assertNotEqual(__a, 0)
self.assertEqual(__a, __a)
self.assertEqual(__a, len(__a))
self.assertEqual(__a, all_size + len(__a))
_lowerCAmelCase : str = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=__a)
self.assertGreaterEqual(len(__a), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
_lowerCAmelCase : Optional[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCAmelCase : List[str] = tokenizer.add_special_tokens(__a)
_lowerCAmelCase : List[Any] = tokenizer.vocab_size
_lowerCAmelCase : str = len(__a)
self.assertNotEqual(__a, 0)
self.assertEqual(__a, __a)
self.assertEqual(__a, len(__a))
self.assertEqual(__a, all_size_a + len(__a))
_lowerCAmelCase : List[Any] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=__a)
self.assertGreaterEqual(len(__a), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokens[-4])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-3], tokenizer.pad_token_id)
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Tuple = tokenizer.tokenize("This is a test")
# fmt: off
self.assertListEqual(__a, [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a), [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6], )
_lowerCAmelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__a, [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."])
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(__a)
# fmt: off
self.assertListEqual(__a, [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a, [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."])
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCAmelCase : Union[str, Any] = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a, model_name="microsoft/speecht5_asr", revision="c5ef64c71905caeccde0e4462ef3f9077224c524", sequences=__a, )
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self, "vision")
self.check_model_type(__a)
def __call__( self, __a, __a = None, **__a, ):
'''simple docstring'''
if "text_queries" in kwargs:
_lowerCAmelCase : str = kwargs.pop("text_queries")
if isinstance(__a, (str, Image.Image)):
_lowerCAmelCase : int = {"image": image, "candidate_labels": candidate_labels}
else:
_lowerCAmelCase : Optional[Any] = image
_lowerCAmelCase : Tuple = super().__call__(__a, **__a)
return results
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
if "threshold" in kwargs:
_lowerCAmelCase : Dict = kwargs["threshold"]
if "top_k" in kwargs:
_lowerCAmelCase : str = kwargs["top_k"]
return {}, {}, postprocess_params
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_image(inputs["image"])
_lowerCAmelCase : Optional[Any] = inputs["candidate_labels"]
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = candidate_labels.split(",")
_lowerCAmelCase : Optional[int] = torch.tensor([[image.height, image.width]], dtype=torch.intaa)
for i, candidate_label in enumerate(__a):
_lowerCAmelCase : Any = self.tokenizer(__a, return_tensors=self.framework)
_lowerCAmelCase : Optional[Any] = self.image_processor(__a, return_tensors=self.framework)
yield {
"is_last": i == len(__a) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = model_inputs.pop("target_size")
_lowerCAmelCase : str = model_inputs.pop("candidate_label")
_lowerCAmelCase : str = model_inputs.pop("is_last")
_lowerCAmelCase : Optional[int] = self.model(**__a)
_lowerCAmelCase : Dict = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def snake_case__ ( self, __a, __a=0.1, __a=None):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for model_output in model_outputs:
_lowerCAmelCase : Any = model_output["candidate_label"]
_lowerCAmelCase : Union[str, Any] = BaseModelOutput(__a)
_lowerCAmelCase : str = self.image_processor.post_process_object_detection(
outputs=__a, threshold=__a, target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_lowerCAmelCase : int = outputs["scores"][index].item()
_lowerCAmelCase : Optional[Any] = self._get_bounding_box(outputs["boxes"][index][0])
_lowerCAmelCase : Optional[int] = {"score": score, "label": label, "box": box}
results.append(__a)
_lowerCAmelCase : Optional[Any] = sorted(__a, key=lambda __a: x["score"], reverse=__a)
if top_k:
_lowerCAmelCase : Union[str, Any] = results[:top_k]
return results
def snake_case__ ( self, __a):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = box.int().tolist()
_lowerCAmelCase : int = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_snake_case = logging.getLogger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = bnb_quantization_config.load_in_abit
_lowerCAmelCase : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
_lowerCAmelCase : Tuple = []
# custom device map
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(device_map.keys() ) > 1:
_lowerCAmelCase : int = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_lowerCAmelCase : Optional[Any] = get_keys_to_not_convert(_lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCamelCase )
_lowerCAmelCase : str = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Optional[int] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCamelCase )
# compatibility with peft
_lowerCAmelCase : List[Any] = load_in_abit
_lowerCAmelCase : Any = load_in_abit
_lowerCAmelCase : Any = get_parameter_device(_lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
_lowerCAmelCase : Optional[int] = replace_with_bnb_layers(_lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase )
# convert param to the right dtype
_lowerCAmelCase : List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_lowerCAmelCase : Tuple = name.replace(".weight" , "" ).replace(".bias" , "" )
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCamelCase ):
param.to(_lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
_lowerCAmelCase : Any = replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase )
_lowerCAmelCase : List[str] = get_quantized_model_device_map(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_memory=_lowerCamelCase , no_split_module_classes=_lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Union[str, Any] = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCamelCase , offload_state_dict=_lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCamelCase , device_map=_lowerCamelCase , offload_dir=_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
_lowerCAmelCase : int = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
_lowerCAmelCase : Union[str, Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = special_dtypes
_lowerCAmelCase : Any = no_split_module_classes
_lowerCAmelCase : Optional[int] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_lowerCAmelCase : Dict = get_balanced_memory(
_lowerCamelCase , low_zero=(device_map == "balanced_low_0") , max_memory=_lowerCamelCase , **_lowerCamelCase , )
_lowerCAmelCase : int = max_memory
_lowerCAmelCase : List[str] = infer_auto_device_map(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# check if don't have any quantized module on the cpu
_lowerCAmelCase : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_lowerCAmelCase : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if modules_to_not_convert is None:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase , _lowerCAmelCase : Any = _replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = False
for name, module in model.named_children():
if current_key_name is None:
_lowerCAmelCase : Dict = []
current_key_name.append(_lowerCamelCase )
if isinstance(_lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_lowerCAmelCase : Tuple = ".".join(_lowerCamelCase )
_lowerCAmelCase : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_lowerCAmelCase : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_lowerCAmelCase : Any = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_lowerCAmelCase : List[str] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
_lowerCAmelCase : Any = module.weight.data
if module.bias is not None:
_lowerCAmelCase : Optional[int] = module.bias.data
bnb_module.requires_grad_(_lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = True
if len(list(module.children() ) ) > 0:
_lowerCAmelCase , _lowerCAmelCase : int = _replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A ( _lowerCamelCase ):
'''simple docstring'''
with init_empty_weights():
_lowerCAmelCase : str = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_lowerCAmelCase : Union[str, Any] = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCAmelCase : Any = sum(_lowerCamelCase , [] )
_lowerCAmelCase : Tuple = len(_lowerCamelCase ) > 0
# Check if it is a base model
_lowerCAmelCase : Any = False
if hasattr(_lowerCamelCase , "base_model_prefix" ):
_lowerCAmelCase : List[str] = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCAmelCase : Any = list(model.named_children() )
_lowerCAmelCase : Dict = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCAmelCase : Dict = set(_lowerCamelCase ) - set(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
_lowerCAmelCase : Any = [".weight", ".bias"]
_lowerCAmelCase : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCAmelCase : Any = name.replace(_lowerCamelCase , "" )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names
def A ( _lowerCamelCase ):
'''simple docstring'''
for m in model.modules():
if isinstance(_lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
return next(parameter.parameters() ).device
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , 0 , dtype=_lowerCamelCase , value=_lowerCamelCase )
_lowerCAmelCase : Dict = param_name
_lowerCAmelCase : Optional[Any] = model
if "." in tensor_name:
_lowerCAmelCase : List[Any] = tensor_name.split("." )
for split in splits[:-1]:
_lowerCAmelCase : Any = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_lowerCAmelCase : List[Any] = new_module
_lowerCAmelCase : Union[str, Any] = splits[-1]
# offload weights
_lowerCAmelCase : List[Any] = False
offload_weight(module._parameters[tensor_name] , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , _lowerCamelCase , index=_lowerCamelCase , )
else:
offload_weight(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase )
offload_weight(_lowerCamelCase , param_name.replace("weight" , "SCB" ) , _lowerCamelCase , index=_lowerCamelCase )
set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , "meta" , dtype=_lowerCamelCase , value=torch.empty(*param.size() ) )
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__a, text_encoder=__a, tokenizer=__a, unet=__a, scheduler=__a, safety_checker=__a, feature_extractor=__a, )
def snake_case__ ( self, __a = "auto"):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a)
def snake_case__ ( self):
'''simple docstring'''
self.enable_attention_slicing(__a)
@torch.no_grad()
def __call__( self, __a, __a = 512, __a = 512, __a = 50, __a = 7.5, __a = None, __a = 1, __a = 0.0, __a = None, __a = None, __a = "pil", __a = True, __a = None, __a = 1, __a = None, **__a, ):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : List[str] = 1
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = len(__a)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__a)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a, __a) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__a)}.")
# get prompt text embeddings
_lowerCAmelCase : List[str] = self.tokenizer(
__a, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", )
_lowerCAmelCase : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
_lowerCAmelCase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = text_embeddings.shape
_lowerCAmelCase : Optional[int] = text_embeddings.repeat(1, __a, 1)
_lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt, __a, -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : List[str]
if negative_prompt is None:
_lowerCAmelCase : Tuple = [""]
elif type(__a) is not type(__a):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__a)} !="
f" {type(__a)}.")
elif isinstance(__a, __a):
_lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(__a):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__a)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`.")
else:
_lowerCAmelCase : Union[str, Any] = negative_prompt
_lowerCAmelCase : List[str] = text_input_ids.shape[-1]
_lowerCAmelCase : int = self.tokenizer(
__a, padding="max_length", max_length=__a, truncation=__a, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase : Optional[int] = uncond_embeddings.shape[1]
_lowerCAmelCase : str = uncond_embeddings.repeat(__a, __a, 1)
_lowerCAmelCase : int = uncond_embeddings.view(batch_size * num_images_per_prompt, __a, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_lowerCAmelCase : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase : Optional[Any] = torch.randn(
__a, generator=__a, device="cpu", dtype=__a).to(self.device)
_lowerCAmelCase : List[Any] = torch.randn(__a, generator=__a, device="cpu", dtype=__a).to(
self.device)
else:
_lowerCAmelCase : List[Any] = torch.randn(
__a, generator=__a, device=self.device, dtype=__a)
_lowerCAmelCase : List[str] = torch.randn(__a, generator=__a, device=self.device, dtype=__a)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
_lowerCAmelCase : Optional[Any] = latents_reference.to(self.device)
_lowerCAmelCase : str = latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_lowerCAmelCase : Optional[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
_lowerCAmelCase : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
_lowerCAmelCase : Dict = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_lowerCAmelCase : int = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_lowerCAmelCase : Union[str, Any] = 0 if dx < 0 else dx
_lowerCAmelCase : Tuple = 0 if dy < 0 else dy
_lowerCAmelCase : str = max(-dx, 0)
_lowerCAmelCase : Dict = max(-dy, 0)
# import pdb
# pdb.set_trace()
_lowerCAmelCase : Tuple = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__a)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase : Dict = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowerCAmelCase : Optional[int] = {}
if accepts_eta:
_lowerCAmelCase : Dict = eta
for i, t in enumerate(self.progress_bar(__a)):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowerCAmelCase : Union[str, Any] = self.scheduler.scale_model_input(__a, __a)
# predict the noise residual
_lowerCAmelCase : Union[str, Any] = self.unet(__a, __a, encoder_hidden_states=__a).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = noise_pred.chunk(2)
_lowerCAmelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Any = self.scheduler.step(__a, __a, __a, **__a).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a, __a, __a)
_lowerCAmelCase : str = 1 / 0.18_215 * latents
_lowerCAmelCase : Dict = self.vae.decode(__a).sample
_lowerCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : Dict = image.cpu().permute(0, 2, 3, 1).float().numpy()
if self.safety_checker is not None:
_lowerCAmelCase : Any = self.feature_extractor(self.numpy_to_pil(__a), return_tensors="pt").to(
self.device)
_lowerCAmelCase , _lowerCAmelCase : int = self.safety_checker(
images=__a, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
_lowerCAmelCase : str = None
if output_type == "pil":
_lowerCAmelCase : Optional[Any] = self.numpy_to_pil(__a)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__a, nsfw_content_detected=__a)
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : str = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 300
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 1
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = nn.functional.normalize(_lowerCamelCase )
_lowerCAmelCase : List[str] = nn.functional.normalize(_lowerCamelCase )
return torch.mm(_lowerCamelCase , normalized_text_embeds.t() )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = CLIPConfig
lowerCamelCase__ = ['CLIPEncoderLayer']
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Any = CLIPVisionModel(config.vision_config)
_lowerCAmelCase : Optional[Any] = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=__a)
_lowerCAmelCase : Any = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=__a)
_lowerCAmelCase : str = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=__a)
_lowerCAmelCase : Any = nn.Parameter(torch.ones(17), requires_grad=__a)
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.ones(3), requires_grad=__a)
@torch.no_grad()
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.vision_model(__a)[1] # pooled_output
_lowerCAmelCase : Optional[int] = self.visual_projection(__a)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : Dict = cosine_distance(__a, self.special_care_embeds).cpu().float().numpy()
_lowerCAmelCase : List[str] = cosine_distance(__a, self.concept_embeds).cpu().float().numpy()
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = image_embeds.shape[0]
for i in range(__a):
_lowerCAmelCase : List[str] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase : Any = 0.0
for concept_idx in range(len(special_cos_dist[0])):
_lowerCAmelCase : str = special_cos_dist[i][concept_idx]
_lowerCAmelCase : int = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase : List[Any] = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
_lowerCAmelCase : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0])):
_lowerCAmelCase : Optional[Any] = cos_dist[i][concept_idx]
_lowerCAmelCase : int = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase : Union[str, Any] = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__a)
result.append(__a)
_lowerCAmelCase : Optional[int] = [len(res["bad_concepts"]) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.vision_model(__a)[1] # pooled_output
_lowerCAmelCase : List[Any] = self.visual_projection(__a)
_lowerCAmelCase : int = cosine_distance(__a, self.special_care_embeds)
_lowerCAmelCase : Dict = cosine_distance(__a, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase : Optional[Any] = 0.0
_lowerCAmelCase : str = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase : List[str] = torch.any(special_scores > 0, dim=1)
_lowerCAmelCase : str = special_care * 0.01
_lowerCAmelCase : int = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
_lowerCAmelCase : str = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase : str = torch.any(concept_scores > 0, dim=1)
return images, has_nsfw_concepts
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , a , a):
lowerCamelCase__ = 32
lowerCamelCase__ = 4
lowerCamelCase__ = 4
lowerCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCamelCase__ = False
lowerCamelCase__ = (320, 640, 1280, 1280)
lowerCamelCase__ = 2
lowerCamelCase__ = 8
lowerCamelCase__ = None
lowerCamelCase__ = 1280
lowerCamelCase__ = 0.0
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = False
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
_lowerCAmelCase : str = jnp.zeros(__a, dtype=jnp.floataa)
_lowerCAmelCase : Optional[int] = jnp.ones((1,), dtype=jnp.intaa)
_lowerCAmelCase : int = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa)
_lowerCAmelCase , _lowerCAmelCase : int = jax.random.split(__a)
_lowerCAmelCase : Tuple = {"params": params_rng, "dropout": dropout_rng}
return self.init(__a, __a, __a, __a)["params"]
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.block_out_channels
_lowerCAmelCase : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.")
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowerCAmelCase : Dict = self.num_attention_heads or self.attention_head_dim
# input
_lowerCAmelCase : int = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_lowerCAmelCase : Optional[int] = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift)
_lowerCAmelCase : Dict = FlaxTimestepEmbedding(__a, dtype=self.dtype)
_lowerCAmelCase : List[Any] = self.only_cross_attention
if isinstance(__a, __a):
_lowerCAmelCase : List[Any] = (only_cross_attention,) * len(self.down_block_types)
if isinstance(__a, __a):
_lowerCAmelCase : Any = (num_attention_heads,) * len(self.down_block_types)
# down
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
_lowerCAmelCase : Union[str, Any] = output_channel
_lowerCAmelCase : Tuple = block_out_channels[i]
_lowerCAmelCase : Dict = i == len(__a) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowerCAmelCase : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
else:
_lowerCAmelCase : List[Any] = FlaxDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(__a)
_lowerCAmelCase : Any = down_blocks
# mid
_lowerCAmelCase : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1], dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
# up
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[int] = list(reversed(__a))
_lowerCAmelCase : str = list(reversed(__a))
_lowerCAmelCase : Optional[Any] = list(reversed(__a))
_lowerCAmelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
_lowerCAmelCase : Dict = output_channel
_lowerCAmelCase : Dict = reversed_block_out_channels[i]
_lowerCAmelCase : Union[str, Any] = reversed_block_out_channels[min(i + 1, len(__a) - 1)]
_lowerCAmelCase : Union[str, Any] = i == len(__a) - 1
if up_block_type == "CrossAttnUpBlock2D":
_lowerCAmelCase : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=__a, out_channels=__a, prev_output_channel=__a, num_layers=self.layers_per_block + 1, num_attention_heads=reversed_num_attention_heads[i], add_upsample=not is_final_block, dropout=self.dropout, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
else:
_lowerCAmelCase : Optional[int] = FlaxUpBlockaD(
in_channels=__a, out_channels=__a, prev_output_channel=__a, num_layers=self.layers_per_block + 1, add_upsample=not is_final_block, dropout=self.dropout, dtype=self.dtype, )
up_blocks.append(__a)
_lowerCAmelCase : Optional[int] = output_channel
_lowerCAmelCase : Union[str, Any] = up_blocks
# out
_lowerCAmelCase : List[str] = nn.GroupNorm(num_groups=32, epsilon=1E-5)
_lowerCAmelCase : Optional[int] = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__( self, __a, __a, __a, __a=None, __a=None, __a = True, __a = False, ):
'''simple docstring'''
if not isinstance(__a, jnp.ndarray):
_lowerCAmelCase : Union[str, Any] = jnp.array([timesteps], dtype=jnp.intaa)
elif isinstance(__a, jnp.ndarray) and len(timesteps.shape) == 0:
_lowerCAmelCase : int = timesteps.astype(dtype=jnp.floataa)
_lowerCAmelCase : Dict = jnp.expand_dims(__a, 0)
_lowerCAmelCase : str = self.time_proj(__a)
_lowerCAmelCase : List[str] = self.time_embedding(__a)
# 2. pre-process
_lowerCAmelCase : str = jnp.transpose(__a, (0, 2, 3, 1))
_lowerCAmelCase : Any = self.conv_in(__a)
# 3. down
_lowerCAmelCase : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__a, __a):
_lowerCAmelCase , _lowerCAmelCase : Any = down_block(__a, __a, __a, deterministic=not train)
else:
_lowerCAmelCase , _lowerCAmelCase : Dict = down_block(__a, __a, deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_lowerCAmelCase : List[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
__a, __a):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_lowerCAmelCase : str = new_down_block_res_samples
# 4. mid
_lowerCAmelCase : Optional[int] = self.mid_block(__a, __a, __a, deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_lowerCAmelCase : Optional[int] = down_block_res_samples[-(self.layers_per_block + 1) :]
_lowerCAmelCase : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = up_block(
__a, temb=__a, encoder_hidden_states=__a, res_hidden_states_tuple=__a, deterministic=not train, )
else:
_lowerCAmelCase : Optional[int] = up_block(__a, temb=__a, res_hidden_states_tuple=__a, deterministic=not train)
# 6. post-process
_lowerCAmelCase : Union[str, Any] = self.conv_norm_out(__a)
_lowerCAmelCase : Tuple = nn.silu(__a)
_lowerCAmelCase : Optional[int] = self.conv_out(__a)
_lowerCAmelCase : Any = jnp.transpose(__a, (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__a)
| 300
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model"}
_snake_case = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_snake_case = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
_snake_case = "▁"
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a="</s>", __a="<unk>", __a="<pad>", __a=100, __a=None, __a = None, __a=True, **__a, ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : Union[str, Any] = [f"<extra_id_{i}>" for i in range(__a)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : Dict = len(set(filter(lambda __a: bool("extra_id" in str(__a)), __a)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_lowerCAmelCase : List[Any] = legacy
_lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__a, unk_token=__a, pad_token=__a, extra_ids=__a, additional_special_tokens=__a, sp_model_kwargs=self.sp_model_kwargs, legacy=__a, **__a, )
_lowerCAmelCase : str = vocab_file
_lowerCAmelCase : Union[str, Any] = extra_ids
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__a)
@staticmethod
def snake_case__ ( __a, __a, __a):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCAmelCase : Dict = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value.", __a, )
return max_model_length
@property
def snake_case__ ( self):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a)) + [1]
return ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
def snake_case__ ( self):
'''simple docstring'''
return list(
set(filter(lambda __a: bool(re.search(R"<extra_id_\d+>", __a)) is not None, self.additional_special_tokens)))
def snake_case__ ( self):
'''simple docstring'''
return [self._convert_token_to_id(__a) for token in self.get_sentinel_tokens()]
def snake_case__ ( self, __a):
'''simple docstring'''
if len(__a) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._add_eos_if_not_present(__a)
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : List[str] = self._add_eos_if_not_present(__a)
return token_ids_a + token_ids_a
def __getstate__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.__dict__.copy()
_lowerCAmelCase : Any = None
return state
def __setstate__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
if not self.legacy:
_lowerCAmelCase : Tuple = SPIECE_UNDERLINE + text.replace(__a, " ")
return super().tokenize(__a, **__a)
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
if not self.legacy:
_lowerCAmelCase : Union[str, Any] = text.startswith(__a)
if is_first:
_lowerCAmelCase : Dict = text[1:]
_lowerCAmelCase : List[Any] = self.sp_model.encode(__a, out_type=__a)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(__a):
_lowerCAmelCase : List[str] = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def snake_case__ ( self, __a):
'''simple docstring'''
if token.startswith("<extra_id_"):
_lowerCAmelCase : str = re.match(R"<extra_id_(\d+)>", __a)
_lowerCAmelCase : Optional[int] = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_lowerCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(__a)
else:
_lowerCAmelCase : Optional[int] = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Any = ""
_lowerCAmelCase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a) + token
_lowerCAmelCase : Dict = True
_lowerCAmelCase : int = []
else:
current_sub_tokens.append(__a)
_lowerCAmelCase : Dict = False
out_string += self.sp_model.decode(__a)
return out_string.strip()
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : List[str] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, __a)
elif not os.path.isfile(self.vocab_file):
with open(__a, "wb") as fi:
_lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(__a)
return (out_vocab_file,)
| 300
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 1
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['input_features', 'attention_mask']
def __init__( self, __a=80, __a=1_6000, __a=0.0, __a=10, __a=25, __a="hamming_window", __a=32_768.0, __a=0.97, __a=1.0, __a=True, __a=True, __a=False, **__a, ):
'''simple docstring'''
super().__init__(feature_size=__a, sampling_rate=__a, padding_value=__a, **__a)
_lowerCAmelCase : Optional[Any] = feature_size
_lowerCAmelCase : Union[str, Any] = sampling_rate
_lowerCAmelCase : List[Any] = padding_value
_lowerCAmelCase : List[Any] = hop_length
_lowerCAmelCase : Optional[Any] = win_length
_lowerCAmelCase : str = frame_signal_scale
_lowerCAmelCase : Any = preemphasis_coeff
_lowerCAmelCase : str = mel_floor
_lowerCAmelCase : Optional[int] = normalize_means
_lowerCAmelCase : List[str] = normalize_vars
_lowerCAmelCase : Any = win_function
_lowerCAmelCase : str = return_attention_mask
_lowerCAmelCase : Optional[int] = win_length * sampling_rate // 1000
_lowerCAmelCase : Dict = hop_length * sampling_rate // 1000
_lowerCAmelCase : int = optimal_fft_length(self.sample_size)
_lowerCAmelCase : Union[str, Any] = (self.n_fft // 2) + 1
def snake_case__ ( self, __a):
'''simple docstring'''
if self.win_function == "hamming_window":
_lowerCAmelCase : Union[str, Any] = window_function(window_length=self.sample_size, name=self.win_function, periodic=__a)
else:
_lowerCAmelCase : List[str] = window_function(window_length=self.sample_size, name=self.win_function)
_lowerCAmelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate, )
_lowerCAmelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale, window=__a, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=__a, preemphasis=self.preemphasis_coeff, mel_filters=__a, mel_floor=self.mel_floor, log_mel="log", )
return msfc_features.T
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if self.normalize_means:
_lowerCAmelCase : Optional[int] = x[:input_length].mean(axis=0)
_lowerCAmelCase : Optional[Any] = np.subtract(__a, __a)
if self.normalize_vars:
_lowerCAmelCase : Optional[int] = x[:input_length].std(axis=0)
_lowerCAmelCase : Union[str, Any] = np.divide(__a, __a)
if input_length < x.shape[0]:
_lowerCAmelCase : Optional[int] = padding_value
# make sure array is in float32
_lowerCAmelCase : Dict = x.astype(np.floataa)
return x
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : str = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__a, __a, self.padding_value) for x, n in zip(__a, __a)]
def __call__( self, __a, __a = False, __a = None, __a = False, __a = None, __a = None, __a = None, __a = None, **__a, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
_lowerCAmelCase : List[str] = isinstance(__a, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
_lowerCAmelCase : Optional[int] = is_batched_numpy or (
isinstance(__a, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
)
if is_batched:
_lowerCAmelCase : Union[str, Any] = [np.asarray(__a, dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(__a, np.ndarray):
_lowerCAmelCase : Union[str, Any] = np.asarray(__a, dtype=np.floataa)
elif isinstance(__a, np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_lowerCAmelCase : List[str] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_lowerCAmelCase : Dict = [raw_speech]
# extract fbank features
_lowerCAmelCase : Dict = [self._extract_mfsc_features(__a) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCAmelCase : Dict = BatchFeature({"input_features": features})
_lowerCAmelCase : Any = self.pad(
__a, padding=__a, max_length=__a, truncation=__a, pad_to_multiple_of=__a, return_attention_mask=__a, **__a, )
# make sure list is in array format
_lowerCAmelCase : str = padded_inputs.get("input_features")
if isinstance(input_features[0], __a):
_lowerCAmelCase : List[Any] = [np.asarray(__a, dtype=np.floataa) for feature in input_features]
_lowerCAmelCase : Any = padded_inputs.get("attention_mask")
if attention_mask is not None:
_lowerCAmelCase : Any = [np.asarray(__a, dtype=np.intaa) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCAmelCase : Union[str, Any] = (
np.array(__a, dtype=np.intaa)
if self._get_padding_strategies(__a, max_length=__a) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCAmelCase : Union[str, Any] = self.normalize(
padded_inputs["input_features"], attention_mask=__a)
if return_tensors is not None:
_lowerCAmelCase : Tuple = padded_inputs.convert_to_tensors(__a)
return padded_inputs
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
lowerCamelCase__ = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'})
lowerCamelCase__ = list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'})
lowerCamelCase__ = field(default=a , metadata={'help': 'Use FP16 to accelerate inference.'})
lowerCamelCase__ = field(default=a , metadata={'help': 'Benchmark training of model'})
lowerCamelCase__ = field(default=a , metadata={'help': 'Verbose memory tracing'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
lowerCamelCase__ = field(default=a , metadata={'help': 'Trace memory line by line'})
lowerCamelCase__ = field(default=a , metadata={'help': 'Save result to a CSV file'})
lowerCamelCase__ = field(default=a , metadata={'help': 'Save all print statements in a log file'})
lowerCamelCase__ = field(default=a , metadata={'help': 'Whether to print environment information'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
lowerCamelCase__ = field(
default=F"inference_time_{round(time())}.csv" , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
lowerCamelCase__ = field(
default=F"inference_memory_{round(time())}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
lowerCamelCase__ = field(
default=F"train_time_{round(time())}.csv" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
lowerCamelCase__ = field(
default=F"train_memory_{round(time())}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
lowerCamelCase__ = field(
default=F"env_info_{round(time())}.csv" , metadata={'help': 'CSV filename used if saving environment information.'} , )
lowerCamelCase__ = field(
default=F"log_{round(time())}.csv" , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
lowerCamelCase__ = field(default=3 , metadata={'help': 'Times an experiment will be run.'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def snake_case__ ( self):
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models.", __a, )
def snake_case__ ( self):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self), indent=2)
@property
def snake_case__ ( self):
'''simple docstring'''
if len(self.models) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased'].")
return self.models
@property
def snake_case__ ( self):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU.")
return False
else:
return True
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
import requests
_snake_case = "" # <-- Put your OpenWeatherMap appid here!
_snake_case = "https://api.openweathermap.org/data/2.5/"
def A ( _lowerCamelCase = "Chicago" , _lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def A ( _lowerCamelCase = "Kolkata, India" , _lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def A ( _lowerCamelCase = 55.68 , _lowerCamelCase = 12.57 , _lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_snake_case = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
from functools import lru_cache
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Any = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCamelCase )
if n > 1:
factors.add(_lowerCamelCase )
return factors
@lru_cache
def A ( _lowerCamelCase ):
'''simple docstring'''
return len(unique_prime_factors(_lowerCamelCase ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return len(set(_lowerCamelCase ) ) in (0, 1)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
_lowerCAmelCase : Tuple = [base + i for i in range(_lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_lowerCAmelCase : List[str] = [upf_len(_lowerCamelCase ) for x in group]
checker.append(_lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def A ( _lowerCamelCase = 4 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = run(_lowerCamelCase )
return results[0] if len(_lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 300
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 1
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def get_matched_characters(_lowerCamelCase , _lowerCamelCase ) -> str:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCAmelCase : Union[str, Any] = int(max(0 , i - limit ) )
_lowerCAmelCase : List[str] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = F"{_stra[0:_stra.index(_lowerCamelCase )]} {_stra[_stra.index(_lowerCamelCase ) + 1:]}"
return "".join(_lowerCamelCase )
# matching characters
_lowerCAmelCase : Any = get_matched_characters(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = get_matched_characters(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
# transposition
_lowerCAmelCase : Any = (
len([(ca, ca) for ca, ca in zip(_lowerCamelCase , _lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
_lowerCAmelCase : Union[str, Any] = 0.0
else:
_lowerCAmelCase : List[Any] = (
1
/ 3
* (
match_count / len(_lowerCamelCase )
+ match_count / len(_lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCAmelCase : Any = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
lowerCamelCase__ = Features({'audio': Audio()})
lowerCamelCase__ = Features({'labels': ClassLabel})
lowerCamelCase__ = "audio"
lowerCamelCase__ = "labels"
def snake_case__ ( self, __a):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], __a):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
_lowerCAmelCase : List[Any] = copy.deepcopy(self)
_lowerCAmelCase : int = self.label_schema.copy()
_lowerCAmelCase : List[str] = features[self.label_column]
_lowerCAmelCase : Tuple = label_schema
return task_template
@property
def snake_case__ ( self):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 300
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'ibert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=1, __a=0, __a=2, __a="absolute", __a=False, __a="none", **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Optional[int] = position_embedding_type
_lowerCAmelCase : List[str] = quant_mode
_lowerCAmelCase : int = force_dequant
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
from ....utils import logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=None, __a=2048):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = config.__dict__
_lowerCAmelCase : List[Any] = modal_hidden_size
if num_labels:
_lowerCAmelCase : Optional[int] = num_labels
| 300
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 1
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
_lowerCAmelCase : Dict = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_lowerCAmelCase : Dict = 1
if upper_limit > 0:
_lowerCAmelCase : Any = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_lowerCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_snake_case = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
import random
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = a[left_index]
_lowerCAmelCase : Dict = left_index + 1
for j in range(left_index + 1 , _lowerCamelCase ):
if a[j] < pivot:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = a[i], a[j]
i += 1
_lowerCAmelCase , _lowerCAmelCase : List[Any] = a[i - 1], a[left_index]
return i - 1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if left < right:
_lowerCAmelCase : Optional[int] = random.randint(_lowerCamelCase , right - 1 )
_lowerCAmelCase , _lowerCAmelCase : Any = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCAmelCase : Tuple = partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
quick_sort_random(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCamelCase , pivot_index + 1 , _lowerCamelCase ) # recursive quicksort to the right of the pivot point
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = input("Enter numbers separated by a comma:\n" ).strip()
_lowerCAmelCase : str = [int(_lowerCamelCase ) for item in user_input.split("," )]
quick_sort_random(_lowerCamelCase , 0 , len(_lowerCamelCase ) )
print(_lowerCamelCase )
if __name__ == "__main__":
main()
| 300
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 1
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : Union[str, Any] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Dict = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Optional[int] = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : int = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
_snake_case = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = RealmTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
__a, tokenizer_file=__a, do_lower_case=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, tokenize_chinese_chars=__a, strip_accents=__a, **__a, )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", __a) != do_lower_case
or normalizer_state.get("strip_accents", __a) != strip_accents
or normalizer_state.get("handle_chinese_chars", __a) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(__a, normalizer_state.pop("type"))
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : Dict = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Any = normalizer_class(**__a)
_lowerCAmelCase : List[Any] = do_lower_case
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = PaddingStrategy.MAX_LENGTH
_lowerCAmelCase : Union[str, Any] = text
_lowerCAmelCase : Any = kwargs.pop("text_pair", __a)
_lowerCAmelCase : Tuple = kwargs.pop("return_tensors", __a)
_lowerCAmelCase : List[str] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(__a):
if batch_text_pair is not None:
_lowerCAmelCase : Optional[Any] = batch_text_pair[idx]
else:
_lowerCAmelCase : str = None
_lowerCAmelCase : Union[str, Any] = super().__call__(__a, __a, return_tensors=__a, **__a)
_lowerCAmelCase : Union[str, Any] = encoded_candidates.get("input_ids")
_lowerCAmelCase : List[Any] = encoded_candidates.get("attention_mask")
_lowerCAmelCase : Optional[int] = encoded_candidates.get("token_type_ids")
if encoded_input_ids is not None:
output_data["input_ids"].append(__a)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__a)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__a)
_lowerCAmelCase : Optional[int] = {key: item for key, item in output_data.items() if len(__a) != 0}
return BatchEncoding(__a, tensor_type=__a)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [0] * len(_lowerCamelCase )
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Optional[int] = [1] * len(_lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCamelCase )
while queue:
_lowerCAmelCase : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCAmelCase : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_lowerCamelCase )
print(max(_lowerCamelCase ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = ["model.decoder.embed_positions.weights"]
def A ( _lowerCamelCase ):
'''simple docstring'''
if "emb" in name:
_lowerCAmelCase : List[Any] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
_lowerCAmelCase : Optional[Any] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
_lowerCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
_lowerCAmelCase : Dict = name.replace("linear1" , "fc1" )
if "linear2" in name:
_lowerCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
_lowerCAmelCase : Optional[Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
_lowerCAmelCase : Tuple = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
_lowerCAmelCase : Any = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
_lowerCAmelCase : Dict = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = list(state_dict.keys() )
_lowerCAmelCase : str = {}
for key in keys:
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = rename_keys(_lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_lowerCAmelCase : int = val[:hidden_size, :]
_lowerCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_lowerCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_lowerCAmelCase : Optional[int] = val
else:
_lowerCAmelCase : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def A ( _lowerCamelCase ):
'''simple docstring'''
if checkpoint == "small":
# default config values
_lowerCAmelCase : Optional[int] = 1_024
_lowerCAmelCase : List[str] = 24
_lowerCAmelCase : int = 16
elif checkpoint == "medium":
_lowerCAmelCase : str = 1_536
_lowerCAmelCase : str = 48
_lowerCAmelCase : Union[str, Any] = 24
elif checkpoint == "large":
_lowerCAmelCase : Dict = 2_048
_lowerCAmelCase : Any = 48
_lowerCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
_lowerCAmelCase : int = MusicgenDecoderConfig(
hidden_size=_lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCamelCase , num_attention_heads=_lowerCamelCase , )
return config
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="cpu" ):
'''simple docstring'''
_lowerCAmelCase : Any = MusicGen.get_pretrained(_lowerCamelCase , device=_lowerCamelCase )
_lowerCAmelCase : int = decoder_config_from_checkpoint(_lowerCamelCase )
_lowerCAmelCase : List[Any] = fairseq_model.lm.state_dict()
_lowerCAmelCase , _lowerCAmelCase : Any = rename_state_dict(
_lowerCamelCase , hidden_size=decoder_config.hidden_size )
_lowerCAmelCase : Optional[Any] = TaEncoderModel.from_pretrained("t5-base" )
_lowerCAmelCase : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_lowerCAmelCase : Union[str, Any] = MusicgenForCausalLM(_lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = decoder.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(_lowerCamelCase ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
_lowerCAmelCase : Any = MusicgenForConditionalGeneration(text_encoder=_lowerCamelCase , audio_encoder=_lowerCamelCase , decoder=_lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCamelCase )
# check we can do a forward pass
_lowerCAmelCase : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_lowerCAmelCase : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_lowerCAmelCase : Dict = model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
_lowerCAmelCase : Tuple = MusicgenProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
# set the appropriate bos/pad token ids
_lowerCAmelCase : Optional[Any] = 2_048
_lowerCAmelCase : Any = 2_048
# set other default generation config params
_lowerCAmelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(_lowerCamelCase )
processor.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_snake_case = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'yolos'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=[512, 864], __a=16, __a=3, __a=True, __a=100, __a=True, __a=False, __a=1, __a=5, __a=2, __a=5, __a=2, __a=0.1, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : List[str] = qkv_bias
_lowerCAmelCase : List[str] = num_detection_tokens
_lowerCAmelCase : Union[str, Any] = use_mid_position_embeddings
_lowerCAmelCase : Optional[int] = auxiliary_loss
# Hungarian matcher
_lowerCAmelCase : Optional[Any] = class_cost
_lowerCAmelCase : Dict = bbox_cost
_lowerCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_lowerCAmelCase : Optional[Any] = bbox_loss_coefficient
_lowerCAmelCase : int = giou_loss_coefficient
_lowerCAmelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return 12
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'AutoTokenizer'
lowerCamelCase__ = ['tokenizer']
lowerCamelCase__ = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self, __a, __a=None):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Any = speaker_embeddings
@classmethod
def snake_case__ ( cls, __a, __a="speaker_embeddings_path.json", **__a):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_lowerCAmelCase : int = get_file_from_repo(
__a, __a, subfolder=kwargs.pop("subfolder", __a), cache_dir=kwargs.pop("cache_dir", __a), force_download=kwargs.pop("force_download", __a), proxies=kwargs.pop("proxies", __a), resume_download=kwargs.pop("resume_download", __a), local_files_only=kwargs.pop("local_files_only", __a), use_auth_token=kwargs.pop("use_auth_token", __a), revision=kwargs.pop("revision", __a), )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(__a, __a)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.")
_lowerCAmelCase : Dict = None
else:
with open(__a) as speaker_embeddings_json:
_lowerCAmelCase : List[Any] = json.load(__a)
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__a, **__a)
return cls(tokenizer=__a, speaker_embeddings=__a)
def snake_case__ ( self, __a, __a="speaker_embeddings_path.json", __a="speaker_embeddings", __a = False, **__a, ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__a, __a, "v2"), exist_ok=__a)
_lowerCAmelCase : int = {}
_lowerCAmelCase : Union[str, Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCAmelCase : Union[str, Any] = self._load_voice_preset(__a)
_lowerCAmelCase : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"], __a, f"{prompt_key}_{key}"), voice_preset[key], allow_pickle=__a, )
_lowerCAmelCase : List[Any] = os.path.join(__a, f"{prompt_key}_{key}.npy")
_lowerCAmelCase : Optional[int] = tmp_dict
with open(os.path.join(__a, __a), "w") as fp:
json.dump(__a, __a)
super().save_pretrained(__a, __a, **__a)
def snake_case__ ( self, __a = None, **__a):
'''simple docstring'''
_lowerCAmelCase : int = self.speaker_embeddings[voice_preset]
_lowerCAmelCase : Optional[int] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].")
_lowerCAmelCase : Tuple = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path", "/"), voice_preset_paths[key], subfolder=kwargs.pop("subfolder", __a), cache_dir=kwargs.pop("cache_dir", __a), force_download=kwargs.pop("force_download", __a), proxies=kwargs.pop("proxies", __a), resume_download=kwargs.pop("resume_download", __a), local_files_only=kwargs.pop("local_files_only", __a), use_auth_token=kwargs.pop("use_auth_token", __a), revision=kwargs.pop("revision", __a), )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path', '/'), voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.")
_lowerCAmelCase : Tuple = np.load(__a)
return voice_preset_dict
def snake_case__ ( self, __a = None):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key.")
if not isinstance(voice_preset[key], np.ndarray):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")
def __call__( self, __a=None, __a=None, __a="pt", __a=256, __a=False, __a=True, __a=False, **__a, ):
'''simple docstring'''
if voice_preset is not None and not isinstance(__a, __a):
if (
isinstance(__a, __a)
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCAmelCase : Union[str, Any] = self._load_voice_preset(__a)
else:
if isinstance(__a, __a) and not voice_preset.endswith(".npz"):
_lowerCAmelCase : Dict = voice_preset + ".npz"
_lowerCAmelCase : Union[str, Any] = np.load(__a)
if voice_preset is not None:
self._validate_voice_preset_dict(__a, **__a)
_lowerCAmelCase : Optional[int] = BatchFeature(data=__a, tensor_type=__a)
_lowerCAmelCase : Tuple = self.tokenizer(
__a, return_tensors=__a, padding="max_length", max_length=__a, return_attention_mask=__a, return_token_type_ids=__a, add_special_tokens=__a, **__a, )
if voice_preset is not None:
_lowerCAmelCase : Tuple = voice_preset
return encoded_text
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
def A ( _lowerCamelCase = 50 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 300
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_snake_case = float("nan")
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = sys.stdout
_lowerCAmelCase : Dict = open(__a, "a")
def __getattr__( self, __a):
'''simple docstring'''
return getattr(self.stdout, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
self.stdout.write(__a)
# strip tqdm codes
self.file.write(re.sub(R"^.*\r", "", __a, 0, re.M))
def A ( _lowerCamelCase=80 , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
# deal with critical env vars
_lowerCAmelCase : List[Any] = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_lowerCAmelCase : List[str] = os.environ.get(_lowerCamelCase , _lowerCamelCase )
if val is not None:
cmd.append(F"{key}={val}" )
# python executable (not always needed if the script is executable)
_lowerCAmelCase : Tuple = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_lowerCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_lowerCAmelCase : Dict = []
_lowerCAmelCase : str = ""
while len(_lowerCamelCase ) > 0:
current_line += F"{cmd.pop(0 )} "
if len(_lowerCamelCase ) == 0 or len(_lowerCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = ""
return "\\\n".join(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_lowerCAmelCase : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_lowerCAmelCase : Dict = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_lowerCAmelCase : Any = subprocess.run(_lowerCamelCase , capture_output=_lowerCamelCase , text=_lowerCamelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_lowerCAmelCase : List[str] = variation.replace(" " , "-" )
with open(Path(_lowerCamelCase ) / F"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(_lowerCamelCase ) / F"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(_lowerCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = F"{id}: {variation:<{longest_variation_len}}"
_lowerCAmelCase : Union[str, Any] = F"{preamble}: "
_lowerCAmelCase : int = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowerCamelCase ) , desc=_lowerCamelCase , leave=_lowerCamelCase ):
_lowerCAmelCase : Tuple = process_run_single(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = single_run_metrics[target_metric_key]
if not math.isnan(_lowerCamelCase ):
metrics.append(_lowerCamelCase )
results.append(_lowerCamelCase )
outcome += "✓"
else:
outcome += "✘"
_lowerCAmelCase : List[str] = F"\33[2K\r{outcome}"
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : str = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_lowerCAmelCase : Dict = round(mean_metrics[target_metric_key] , 2 )
_lowerCAmelCase : Tuple = F"{outcome} {mean_target}"
if len(_lowerCamelCase ) > 1:
results_str += F" {tuple(round(_lowerCamelCase , 2 ) for x in results )}"
print(_lowerCamelCase )
_lowerCAmelCase : Tuple = variation
return mean_metrics
else:
print(_lowerCamelCase )
return {variation_key: variation, target_metric_key: nan}
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = pd.DataFrame(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = "variation"
_lowerCAmelCase : List[Any] = "diff_%"
_lowerCAmelCase : Tuple = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_lowerCAmelCase : str = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowerCamelCase ):
# as a fallback, use the minimal value as the sentinel
_lowerCAmelCase : Optional[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowerCamelCase ):
_lowerCAmelCase : Dict = df.apply(
lambda _lowerCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_lowerCAmelCase : int = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_lowerCAmelCase : Dict = df.reindex(_lowerCamelCase , axis="columns" ) # reorder cols
# capitalize
_lowerCAmelCase : str = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_lowerCAmelCase : str = df.rename(lambda _lowerCamelCase : c.replace("_" , "<br>" ) , axis="columns" )
_lowerCAmelCase : List[str] = df.rename(lambda _lowerCamelCase : c.replace("_" , "\n" ) , axis="columns" )
_lowerCAmelCase : List[Any] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowerCamelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowerCamelCase , floatfmt=".2f" )]
print("\n\n".join(_lowerCamelCase ) )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_lowerCamelCase , type=_lowerCamelCase , nargs="+" , required=_lowerCamelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_lowerCamelCase , type=_lowerCamelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_lowerCamelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_lowerCamelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_lowerCamelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_lowerCamelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = args.output_dir
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
_lowerCAmelCase : Tuple = get_base_command(_lowerCamelCase , _lowerCamelCase )
# split each dimension into its --foo variations
_lowerCAmelCase : Optional[int] = [list(map(str.strip , re.split(r"\|" , _lowerCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_lowerCAmelCase : Any = list(map(str.strip , map(" ".join , itertools.product(*_lowerCamelCase ) ) ) )
_lowerCAmelCase : Optional[int] = max(len(_lowerCamelCase ) for x in variations )
# split wanted keys
_lowerCAmelCase : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_lowerCAmelCase : str = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(F"and this script's output is also piped into {report_fn}" )
_lowerCAmelCase : Any = Tee(_lowerCamelCase )
print(F"\n*** Running {len(_lowerCamelCase )} benchmarks:" )
print(F"Base command: {' '.join(_lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = "variation"
_lowerCAmelCase : int = []
for id, variation in enumerate(tqdm(_lowerCamelCase , desc="Total completion: " , leave=_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.target_metric_key , _lowerCamelCase , args.repeat_times , _lowerCamelCase , args.verbose , ) )
process_results(_lowerCamelCase , args.target_metric_key , _lowerCamelCase , args.base_variation , _lowerCamelCase )
if __name__ == "__main__":
main()
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10**-10 ):
'''simple docstring'''
_lowerCAmelCase : Dict = a
while True:
_lowerCAmelCase : List[str] = Decimal(_lowerCamelCase ) - (
Decimal(eval(_lowerCamelCase ) ) / Decimal(eval(str(diff(_lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCamelCase ) ) < precision: # noqa: S307
return float(_lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 1
|
import os
from datetime import datetime as dt
from github import Github
_snake_case = [
"good first issue",
"feature request",
"wip",
]
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Github(os.environ["GITHUB_TOKEN"] )
_lowerCAmelCase : Tuple = g.get_repo("huggingface/accelerate" )
_lowerCAmelCase : str = repo.get_issues(state="open" )
for issue in open_issues:
_lowerCAmelCase : str = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase : i.created_at , reverse=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = comments[0] if len(_lowerCamelCase ) > 0 else None
_lowerCAmelCase : Union[str, Any] = dt.utcnow()
_lowerCAmelCase : str = (current_time - issue.updated_at).days
_lowerCAmelCase : List[str] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 300
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'instructblip_vision_model'
def __init__( self, __a=1408, __a=6144, __a=39, __a=16, __a=224, __a=14, __a="gelu", __a=1E-6, __a=0.0, __a=1E-10, __a=True, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : int = qkv_bias
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
cls._set_token_in_kwargs(__a)
_lowerCAmelCase , _lowerCAmelCase : str = cls.get_config_dict(__a, **__a)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type") == "instructblip":
_lowerCAmelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'instructblip_qformer'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=0.02, __a=1E-12, __a=0, __a="absolute", __a=2, __a=1408, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=__a, **__a)
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Any = position_embedding_type
_lowerCAmelCase : int = cross_attention_frequency
_lowerCAmelCase : Optional[int] = encoder_hidden_size
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
cls._set_token_in_kwargs(__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = cls.get_config_dict(__a, **__a)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type") == "instructblip":
_lowerCAmelCase : List[str] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'instructblip'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=None, __a=None, __a=32, **__a):
'''simple docstring'''
super().__init__(**__a)
if vision_config is None:
_lowerCAmelCase : Union[str, Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values.")
if qformer_config is None:
_lowerCAmelCase : str = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.")
if text_config is None:
_lowerCAmelCase : List[Any] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
_lowerCAmelCase : Tuple = InstructBlipVisionConfig(**__a)
_lowerCAmelCase : Optional[Any] = InstructBlipQFormerConfig(**__a)
_lowerCAmelCase : Any = text_config["model_type"] if "model_type" in text_config else "opt"
_lowerCAmelCase : str = CONFIG_MAPPING[text_model_type](**__a)
_lowerCAmelCase : Optional[Any] = self.text_config.tie_word_embeddings
_lowerCAmelCase : Optional[int] = self.text_config.is_encoder_decoder
_lowerCAmelCase : Any = num_query_tokens
_lowerCAmelCase : List[Any] = self.vision_config.hidden_size
_lowerCAmelCase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase : str = 1.0
_lowerCAmelCase : Optional[int] = 0.02
@classmethod
def snake_case__ ( cls, __a, __a, __a, **__a, ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **__a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = copy.deepcopy(self.__dict__)
_lowerCAmelCase : str = self.vision_config.to_dict()
_lowerCAmelCase : str = self.qformer_config.to_dict()
_lowerCAmelCase : Union[str, Any] = self.text_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
from __future__ import annotations
import math
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(_lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
_lowerCAmelCase : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
_lowerCAmelCase : str = matrix_length // 2
_lowerCAmelCase : str = [[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase )]
_lowerCAmelCase : Tuple = [
[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )
]
_lowerCAmelCase : Union[str, Any] = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase )]
_lowerCAmelCase : Dict = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def A ( _lowerCamelCase ):
'''simple docstring'''
return len(_lowerCamelCase ), len(matrix[0] )
def A ( _lowerCamelCase ):
'''simple docstring'''
print("\n".join(str(_lowerCamelCase ) for line in matrix ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if matrix_dimensions(_lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = split_matrix(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = split_matrix(_lowerCamelCase )
_lowerCAmelCase : List[Any] = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Any = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
_lowerCAmelCase : Any = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : str = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Dict = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Tuple = matrix_addition(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = matrix_addition(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = matrix_addition(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = matrix_subtraction(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
# construct the new matrix from our 4 quadrants
_lowerCAmelCase : List[Any] = []
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if matrix_dimensions(_lowerCamelCase )[1] != matrix_dimensions(_lowerCamelCase )[0]:
_lowerCAmelCase : int = (
"Unable to multiply these matrices, please check the dimensions.\n"
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = matrix_dimensions(_lowerCamelCase )
_lowerCAmelCase : int = matrix_dimensions(_lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_lowerCAmelCase : Dict = max(*_lowerCamelCase , *_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(_lowerCamelCase ) ) ) )
_lowerCAmelCase : Optional[Any] = matrixa
_lowerCAmelCase : str = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_lowerCAmelCase : Union[str, Any] = actual_strassen(_lowerCamelCase , _lowerCamelCase )
# Removing the additional zeros
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_snake_case = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_snake_case = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_snake_case = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_snake_case = {
"ctrl": 256,
}
_snake_case = {
"Pregnancy": 16_8629,
"Christianity": 7675,
"Explain": 10_6423,
"Fitness": 6_3440,
"Saving": 6_3163,
"Ask": 2_7171,
"Ass": 9_5985,
"Joke": 16_3509,
"Questions": 4_5622,
"Thoughts": 4_9605,
"Retail": 5_2342,
"Feminism": 16_4338,
"Writing": 1_1992,
"Atheism": 19_2263,
"Netflix": 4_8616,
"Computing": 3_9639,
"Opinion": 4_3213,
"Alone": 4_4967,
"Funny": 5_8917,
"Gaming": 4_0358,
"Human": 4088,
"India": 1331,
"Joker": 7_7138,
"Diet": 3_6206,
"Legal": 1_1859,
"Norman": 4939,
"Tip": 7_2689,
"Weight": 5_2343,
"Movies": 4_6273,
"Running": 2_3425,
"Science": 2090,
"Horror": 3_7793,
"Confession": 6_0572,
"Finance": 1_2250,
"Politics": 1_6360,
"Scary": 19_1985,
"Support": 1_2654,
"Technologies": 3_2516,
"Teenage": 6_6160,
"Event": 3_2769,
"Learned": 6_7460,
"Notion": 18_2770,
"Wikipedia": 3_7583,
"Books": 6665,
"Extract": 7_6050,
"Confessions": 10_2701,
"Conspiracy": 7_5932,
"Links": 6_3674,
"Narcissus": 15_0425,
"Relationship": 5_4766,
"Relationships": 13_4796,
"Reviews": 4_1671,
"News": 4256,
"Translation": 2_6820,
"multilingual": 12_8406,
}
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = set()
_lowerCAmelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Union[str, Any] = char
_lowerCAmelCase : List[Any] = set(_lowerCamelCase )
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTROL_CODES
def __init__( self, __a, __a, __a="<unk>", **__a):
'''simple docstring'''
super().__init__(unk_token=__a, **__a)
with open(__a, encoding="utf-8") as vocab_handle:
_lowerCAmelCase : Tuple = json.load(__a)
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : Union[str, Any] = merges_handle.read().split("\n")[1:-1]
_lowerCAmelCase : Dict = [tuple(merge.split()) for merge in merges]
_lowerCAmelCase : Dict = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : str = {}
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = tuple(__a)
_lowerCAmelCase : str = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_lowerCAmelCase : Tuple = get_pairs(__a)
if not pairs:
return token
while True:
_lowerCAmelCase : str = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : List[Any] = bigram
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[int] = 0
while i < len(__a):
try:
_lowerCAmelCase : int = word.index(__a, __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCAmelCase : Any = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : Dict = tuple(__a)
_lowerCAmelCase : Tuple = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Tuple = get_pairs(__a)
_lowerCAmelCase : List[str] = "@@ ".join(__a)
_lowerCAmelCase : Dict = word[:-4]
_lowerCAmelCase : Dict = word
return word
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Any = re.findall(R"\S+\n?", __a)
for token in words:
split_tokens.extend(list(self.bpe(__a).split(" ")))
return split_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a, self.unk_token)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = " ".join(__a).replace("@@ ", "").strip()
return out_string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Dict = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : int = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__a, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__a, ensure_ascii=__a) + "\n")
_lowerCAmelCase : Dict = 0
with open(__a, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __a: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_lowerCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__a) + "\n")
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 300
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), )
def snake_case__ ( self, __a, __a, __a = 1, __a = 4, ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a, hypotheses=__a, min_len=__a, max_len=__a)
}
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
from math import factorial
def A ( _lowerCamelCase = 100 ):
'''simple docstring'''
return sum(int(_lowerCamelCase ) for x in str(factorial(_lowerCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 300
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=None, __a=None, __a=0):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 1.0 if scale is None else scale
_lowerCAmelCase : Union[str, Any] = 0.0 if loc is None else loc
super().__init__(__a, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=__a)])
@property
def snake_case__ ( self):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case__ ( self):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def snake_case__ ( self):
'''simple docstring'''
return self.variance.sqrt()
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a, __a, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Optional[int] = args_dim
_lowerCAmelCase : int = nn.ModuleList([nn.Linear(__a, __a) for dim in args_dim.values()])
_lowerCAmelCase : str = domain_map
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [proj(__a) for proj in self.proj]
return self.domain_map(*__a)
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = function
def snake_case__ ( self, __a, *__a):
'''simple docstring'''
return self.function(__a, *__a)
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a = 1):
'''simple docstring'''
_lowerCAmelCase : int = dim
_lowerCAmelCase : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case__ ( self, __a):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*__a)
else:
return Independent(self.distribution_class(*__a), 1)
def snake_case__ ( self, __a, __a = None, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : int = self._base_distribution(__a)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a, loc=__a, scale=__a, event_dim=self.event_dim)
@property
def snake_case__ ( self):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.event_shape)
@property
def snake_case__ ( self):
'''simple docstring'''
return 0.0
def snake_case__ ( self, __a):
'''simple docstring'''
return ParameterProjection(
in_features=__a, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map), )
def snake_case__ ( self, *__a):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
return (x + torch.sqrt(torch.square(__a) + 4.0)) / 2.0
class UpperCAmelCase_ ( a):
lowerCamelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCamelCase__ = StudentT
@classmethod
def snake_case__ ( cls, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = cls.squareplus(__a).clamp_min(torch.finfo(scale.dtype).eps)
_lowerCAmelCase : str = 2.0 + cls.squareplus(__a)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = {"loc": 1, "scale": 1}
lowerCamelCase__ = Normal
@classmethod
def snake_case__ ( cls, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = cls.squareplus(__a).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = {"total_count": 1, "logits": 1}
lowerCamelCase__ = NegativeBinomial
@classmethod
def snake_case__ ( cls, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = cls.squareplus(__a)
return total_count.squeeze(-1), logits.squeeze(-1)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a, logits=__a)
else:
return Independent(self.distribution_class(total_count=__a, logits=__a), 1)
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'visual_bert'
def __init__( self, __a=3_0522, __a=768, __a=512, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=False, __a=True, __a=1, __a=0, __a=2, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Optional[Any] = visual_embedding_dim
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : int = type_vocab_size
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : List[Any] = bypass_transformer
_lowerCAmelCase : Tuple = special_visual_initialize
| 300
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_snake_case = False
_snake_case = True
_snake_case = False
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_snake_case = parser.parse_args()
_snake_case = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
_snake_case = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
_snake_case = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
_snake_case = reader.read()
_snake_case = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
_snake_case = UNetaDModel(**config)
else:
_snake_case = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
_snake_case = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_snake_case = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_snake_case = config[key]
del config[key]
_snake_case = [k.replace("UNetRes", "") for k in config["down_block_types"]]
_snake_case = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
_snake_case = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
_snake_case = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
_snake_case = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
_snake_case = param_value
_snake_case = True
if not has_changed:
_snake_case = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
from collections import defaultdict
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_lowerCAmelCase : List[Any] = [
[-1 for i in range(total + 1)] for j in range(2 ** len(__a))
]
_lowerCAmelCase : str = defaultdict(__a) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_lowerCAmelCase : Union[str, Any] = (1 << len(__a)) - 1
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_lowerCAmelCase : List[str] = self.count_ways_until(__a, task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1)
# save the value.
_lowerCAmelCase : Optional[Any] = total_ways_util
return self.dp[mask][task_no]
def snake_case__ ( self, __a):
'''simple docstring'''
for i in range(len(__a)):
for j in task_performed[i]:
self.task[j].append(__a)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1)
if __name__ == "__main__":
_snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 300
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = True, __a = None, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : List[Any] = size if size is not None else {"shortest_edge": 256}
_lowerCAmelCase : Dict = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase : Optional[Any] = get_size_dict(__a, param_name="crop_size")
_lowerCAmelCase : Union[str, Any] = do_resize
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : Tuple = resample
_lowerCAmelCase : List[Any] = do_center_crop
_lowerCAmelCase : Union[str, Any] = crop_size
_lowerCAmelCase : Optional[Any] = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(__a, default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_lowerCAmelCase : str = get_resize_output_image_size(__a, size=size["shortest_edge"], default_to_square=__a)
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(__a, size=(size["height"], size["width"]), data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : Tuple = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Any = get_size_dict(__a, param_name="crop_size")
_lowerCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : int = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : Dict = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : str = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_center_crop:
_lowerCAmelCase : Dict = [self.center_crop(image=__a, size=__a) for image in images]
if do_rescale:
_lowerCAmelCase : Union[str, Any] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : List[str] = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : int = target_sizes.numpy()
_lowerCAmelCase : Any = []
for idx in range(len(__a)):
_lowerCAmelCase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : List[Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : str = logits.argmax(dim=1)
_lowerCAmelCase : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 1
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return f"gaussian_noise_s={seed}_shape={'_'.join([str(__a) for s in shape])}.npy"
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self, __a=0, __a=(4, 4, 64, 64), __a=False):
'''simple docstring'''
_lowerCAmelCase : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
_lowerCAmelCase : str = jnp.array(load_hf_numpy(self.get_file_format(__a, __a)), dtype=__a)
return image
def snake_case__ ( self, __a=False, __a="CompVis/stable-diffusion-v1-4"):
'''simple docstring'''
_lowerCAmelCase : Any = jnp.bfloataa if fpaa else jnp.floataa
_lowerCAmelCase : Any = "bf16" if fpaa else None
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
__a, subfolder="unet", dtype=__a, revision=__a)
return model, params
def snake_case__ ( self, __a=0, __a=(4, 77, 768), __a=False):
'''simple docstring'''
_lowerCAmelCase : int = jnp.bfloataa if fpaa else jnp.floataa
_lowerCAmelCase : Any = jnp.array(load_hf_numpy(self.get_file_format(__a, __a)), dtype=__a)
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
])
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fpaa=__a)
_lowerCAmelCase : Dict = self.get_latents(__a, fpaa=__a)
_lowerCAmelCase : Dict = self.get_encoder_hidden_states(__a, fpaa=__a)
_lowerCAmelCase : Any = model.apply(
{"params": params}, __a, jnp.array(__a, dtype=jnp.intaa), encoder_hidden_states=__a, ).sample
assert sample.shape == latents.shape
_lowerCAmelCase : Tuple = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.floataa)
_lowerCAmelCase : Tuple = jnp.array(__a, dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a, __a, atol=1E-2)
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
])
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fpaa=__a)
_lowerCAmelCase : Optional[Any] = self.get_latents(__a, shape=(4, 4, 96, 96), fpaa=__a)
_lowerCAmelCase : List[Any] = self.get_encoder_hidden_states(__a, shape=(4, 77, 1024), fpaa=__a)
_lowerCAmelCase : Dict = model.apply(
{"params": params}, __a, jnp.array(__a, dtype=jnp.intaa), encoder_hidden_states=__a, ).sample
assert sample.shape == latents.shape
_lowerCAmelCase : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.floataa)
_lowerCAmelCase : List[str] = jnp.array(__a, dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a, __a, atol=1E-2)
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
from math import ceil
def A ( _lowerCamelCase = 1_001 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_lowerCAmelCase : Any = 2 * i + 1
_lowerCAmelCase : List[str] = 2 * i
_lowerCAmelCase : Union[str, Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a = None, __a = None, __a = True, __a = None, __a = False, __a = None, __a = True, __a = "arrow", **__a, ):
'''simple docstring'''
super().__init__(
split=__a, features=__a, cache_dir=__a, keep_in_memory=__a, streaming=__a, **__a, )
_lowerCAmelCase : Any = load_from_cache_file
_lowerCAmelCase : Any = file_format
_lowerCAmelCase : Optional[int] = Spark(
df=__a, features=__a, cache_dir=__a, working_dir=__a, **__a, )
def snake_case__ ( self):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
_lowerCAmelCase : Tuple = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__a, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split)
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Union[str, Any] = {}
def snake_case__ ( self, __a, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super().add_tokens(__a, *__a, **__a)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer.")
def snake_case__ ( self, __a, *__a, __a=1, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__a, *__a, **__a)
output.append(__a)
else:
_lowerCAmelCase : Optional[Any] = []
for i in range(__a):
_lowerCAmelCase : Union[str, Any] = placeholder_token + f"_{i}"
self.try_adding_tokens(__a, *__a, **__a)
output.append(__a)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent")
_lowerCAmelCase : List[str] = output
def snake_case__ ( self, __a, __a=False, __a=1.0):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : Dict = []
for i in range(len(__a)):
output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=__a))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCAmelCase : Optional[int] = self.token_map[placeholder_token]
_lowerCAmelCase : Union[str, Any] = tokens[: 1 + int(len(__a) * prop_tokens_to_load)]
if vector_shuffle:
_lowerCAmelCase : int = copy.copy(__a)
random.shuffle(__a)
_lowerCAmelCase : int = text.replace(__a, " ".join(__a))
return text
def __call__( self, __a, *__a, __a=False, __a=1.0, **__a):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
__a, vector_shuffle=__a, prop_tokens_to_load=__a), *__a, **__a, )
def snake_case__ ( self, __a, *__a, __a=False, __a=1.0, **__a):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
__a, vector_shuffle=__a, prop_tokens_to_load=__a), *__a, **__a, )
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
import operator as op
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = lambda _lowerCamelCase , _lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
_lowerCAmelCase : Any = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
else:
_lowerCAmelCase : Optional[int] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
_lowerCAmelCase : Optional[int] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_lowerCamelCase ) , int(_lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
_snake_case = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = Vector([1, 2, 3])
self.assertEqual(x.component(0), 1)
self.assertEqual(x.component(2), 3)
_lowerCAmelCase : int = Vector()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(__a), "(0,0,0,0,0,1)")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = Vector([1, 2, 3, 4])
self.assertEqual(len(__a), 4)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Vector([1, 2])
_lowerCAmelCase : List[Any] = Vector([1, 2, 3, 4, 5])
_lowerCAmelCase : int = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_lowerCAmelCase : Optional[int] = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length(), 2.236, 3)
self.assertAlmostEqual(y.euclidean_length(), 7.416, 3)
self.assertEqual(z.euclidean_length(), 0)
self.assertAlmostEqual(w.euclidean_length(), 7.616, 3)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Vector([1, 2, 3])
_lowerCAmelCase : List[str] = Vector([1, 1, 1])
self.assertEqual((x + y).component(0), 2)
self.assertEqual((x + y).component(1), 3)
self.assertEqual((x + y).component(2), 4)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = Vector([1, 2, 3])
_lowerCAmelCase : str = Vector([1, 1, 1])
self.assertEqual((x - y).component(0), 0)
self.assertEqual((x - y).component(1), 1)
self.assertEqual((x - y).component(2), 2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = Vector([1, 2, 3])
_lowerCAmelCase : str = Vector([2, -1, 4]) # for test of dot product
_lowerCAmelCase : List[str] = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)")
self.assertEqual((a * b), 0)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(str(zero_vector(10)).count("0"), 10)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3, 1)), "(0,1,0)")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Vector([1, 2, 3])
_lowerCAmelCase : int = Vector([1, 0, 1])
self.assertEqual(str(axpy(2, __a, __a)), "(3,4,7)")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Vector([1, 0, 0, 0, 0, 0])
_lowerCAmelCase : Optional[int] = x.copy()
self.assertEqual(str(__a), str(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Vector([1, 0, 0])
x.change_component(0, 0)
x.change_component(1, 1)
self.assertEqual(str(__a), "(0,1,0)")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
_lowerCAmelCase : Any = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y], a.minor(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
_lowerCAmelCase : Optional[int] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y], a.cofactor(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
self.assertEqual(-5, a.determinant())
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3)
_lowerCAmelCase : Dict = Vector([1, 2, 3])
self.assertEqual("(14,32,50)", str(a * x))
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(a * 2))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
a.change_component(0, 2, 5)
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
self.assertEqual(7, a.component(2, 1), 0.01)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
_lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(a + b))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
_lowerCAmelCase : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(a - b))
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n", str(square_zero_matrix(5)), )
if __name__ == "__main__":
unittest.main()
| 300
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def snake_case__ ( *__a, **__a):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
_lowerCAmelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_lowerCAmelCase : Optional[Any] = image_classifier(__a, candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__a), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
_lowerCAmelCase : Optional[Any] = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2)
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
], )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf")
_lowerCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_lowerCAmelCase : Union[str, Any] = image_classifier(__a, candidate_labels=["a", "b", "c"])
self.assertEqual(
nested_simplify(__a), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
_lowerCAmelCase : Dict = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2)
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
], )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_lowerCAmelCase : int = image_classifier(__a, candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(__a), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
_lowerCAmelCase : Optional[int] = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2)
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf")
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_lowerCAmelCase : List[str] = image_classifier(__a, candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(__a), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
_lowerCAmelCase : Optional[int] = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2)
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
_snake_case = 6_5521
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : int = 0
for plain_chr in plain_text:
_lowerCAmelCase : Union[str, Any] = (a + ord(_lowerCamelCase )) % MOD_ADLER
_lowerCAmelCase : List[Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = self.get_rust_tokenizer()
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : List[str] = AlignProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : Tuple = AlignProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : str = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : int = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : str = AlignProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : int = AlignProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Dict = AlignProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[str] = tokenizer(__a, padding="max_length", max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = AlignProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = "lower newer"
_lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Tuple = AlignProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(__a)
_lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Any = AlignProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Tuple = "lower newer"
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 300
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 1
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 1
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = logging.get_logger()
# the current default level is logging.WARNING
_lowerCAmelCase : Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
# restore to the original level
logging.set_verbosity(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = logging.get_verbosity()
_lowerCAmelCase : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart")
_lowerCAmelCase : int = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__a) as cl:
logger.warning(__a)
self.assertEqual(cl.out, msg + "\n")
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__a) as cl:
logger.warning(__a)
self.assertEqual(cl.out, "")
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__a) as cl:
logger.warning(__a)
self.assertEqual(cl.out, msg + "\n")
# restore to the original level
logging.set_verbosity(__a)
@mockenv(TRANSFORMERS_VERBOSITY="error")
def snake_case__ ( self):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowerCAmelCase : Optional[int] = logging.get_logger("transformers.models.bart.tokenization_bart")
_lowerCAmelCase : Optional[Any] = os.getenv("TRANSFORMERS_VERBOSITY", __a)
_lowerCAmelCase : List[str] = logging.log_levels[env_level_str]
_lowerCAmelCase : List[Any] = logging.get_verbosity()
self.assertEqual(
__a, __a, f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}", )
# restore to the original level
_lowerCAmelCase : int = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error")
def snake_case__ ( self):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
_lowerCAmelCase : str = logging.logging.getLogger()
with CaptureLogger(__a) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart")
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out)
# no need to restore as nothing was changed
def snake_case__ ( self):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
_lowerCAmelCase : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart")
_lowerCAmelCase : Dict = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1"):
# nothing should be logged as env var disables this method
with CaptureLogger(__a) as cl:
logger.warning_advice(__a)
self.assertEqual(cl.out, "")
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=""):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__a) as cl:
logger.warning_advice(__a)
self.assertEqual(cl.out, msg + "\n")
def A ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = generate_pascal_triangle(_lowerCamelCase )
for row_idx in range(_lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_lowerCAmelCase : list[list[int]] = []
for current_row_idx in range(_lowerCamelCase ):
_lowerCAmelCase : Dict = populate_current_row(_lowerCamelCase , _lowerCamelCase )
triangle.append(_lowerCamelCase )
return triangle
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_lowerCAmelCase , _lowerCAmelCase : Tuple = 1, 1
for current_col_idx in range(1 , _lowerCamelCase ):
calculate_current_element(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return current_row
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = triangle[current_row_idx - 1][current_col_idx - 1]
_lowerCAmelCase : str = triangle[current_row_idx - 1][current_col_idx]
_lowerCAmelCase : List[Any] = above_to_left_elt + above_to_right_elt
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_lowerCAmelCase : list[list[int]] = [[1]]
for row_index in range(1 , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = [0] + result[-1] + [0]
_lowerCAmelCase : Optional[Any] = row_index + 1
# Calculate the number of distinct elements in a row
_lowerCAmelCase : int = sum(divmod(_lowerCamelCase , 2 ) )
_lowerCAmelCase : List[Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_lowerCAmelCase : List[str] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_lowerCAmelCase : Optional[int] = row_first_half + row_second_half
result.append(_lowerCamelCase )
return result
def A ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase , _lowerCamelCase ) -> None:
_lowerCAmelCase : List[Any] = F"{func.__name__}({value})"
_lowerCAmelCase : Tuple = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 300
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 1
|
from collections.abc import Generator
def A ( ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = 0, 1
while True:
_lowerCAmelCase , _lowerCAmelCase : List[str] = b, a + b
yield b
def A ( _lowerCamelCase = 1_000 ):
'''simple docstring'''
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Any = fibonacci_generator()
while len(str(next(_lowerCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase_ ( a):
def __init__( self, __a=0.01, __a=1000):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = p_stop
_lowerCAmelCase : Any = max_length
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : int = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCAmelCase : Tuple = random.random() < self.p_stop
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a, __a, __a=False, __a=True):
'''simple docstring'''
_lowerCAmelCase : List[str] = [
BatchSamplerShard(__a, 2, __a, split_batches=__a, even_batches=__a)
for i in range(2)
]
_lowerCAmelCase : List[Any] = [list(__a) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__a) for shard in batch_sampler_shards], [len(__a) for e in expected])
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = BatchSampler(range(24), batch_size=3, drop_last=__a)
_lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Tuple = BatchSampler(range(24), batch_size=3, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : int = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Dict = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : List[str] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : Optional[int] = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : Any = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Tuple = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[Any] = [[], []]
self.check_batch_sampler_shards(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = BatchSampler(range(24), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : int = BatchSampler(range(24), batch_size=4, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : Dict = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : List[Any] = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : Optional[Any] = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : Tuple = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[str] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BatchSampler(range(24), batch_size=3, drop_last=__a)
_lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(24), batch_size=3, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : Optional[int] = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : Optional[Any] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Optional[Any] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : List[str] = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : List[str] = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Dict = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : str = [[], []]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = BatchSampler(range(24), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : List[str] = BatchSampler(range(24), batch_size=4, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : Optional[int] = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : Dict = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : Optional[Any] = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : Tuple = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : Dict = [[], []]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_lowerCAmelCase : Any = [BatchSamplerShard(__a, 2, __a, even_batches=__a) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]), 3)
self.assertEqual(len(batch_sampler_shards[1]), 2)
self.assertListEqual(list(batch_sampler_shards[0]), [[0, 1, 2], [5, 6, 7, 8], [12, 13]])
self.assertListEqual(list(batch_sampler_shards[1]), [[3, 4], [9, 10, 11]])
def snake_case__ ( self, __a, __a, __a, __a=False, __a=2, __a=False):
'''simple docstring'''
random.seed(__a)
_lowerCAmelCase : Tuple = list(__a)
_lowerCAmelCase : Tuple = [
IterableDatasetShard(
__a, batch_size=__a, drop_last=__a, num_processes=__a, process_index=__a, split_batches=__a, )
for i in range(__a)
]
_lowerCAmelCase : Optional[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__a)
iterable_dataset_lists.append(list(__a))
_lowerCAmelCase : Tuple = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCAmelCase : Optional[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__a), len(__a))
self.assertTrue(len(__a) % shard_batch_size == 0)
_lowerCAmelCase : Union[str, Any] = []
for idx in range(0, len(__a), __a):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__a) < len(__a):
reference += reference
self.assertListEqual(__a, reference[: len(__a)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 42
_lowerCAmelCase : Any = RandomIterableDataset()
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
# Edge case with a very small dataset
_lowerCAmelCase : Optional[int] = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BatchSampler(range(16), batch_size=4, drop_last=__a)
_lowerCAmelCase : str = SkipBatchSampler(__a, 2)
self.assertListEqual(list(__a), [[8, 9, 10, 11], [12, 13, 14, 15]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = DataLoader(list(range(16)), batch_size=4)
_lowerCAmelCase : Tuple = skip_first_batches(__a, num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = DataLoaderShard(list(range(16)), batch_size=4)
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
def snake_case__ ( self):
'''simple docstring'''
Accelerator()
_lowerCAmelCase : Dict = DataLoaderDispatcher(range(16), batch_size=4)
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
| 300
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 1
|
import math
def A ( _lowerCamelCase ):
'''simple docstring'''
return math.sqrt(_lowerCamelCase ) * math.sqrt(_lowerCamelCase ) == num
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = n
while left <= right:
_lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCAmelCase : Any = mid - 1
else:
_lowerCAmelCase : Any = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 300
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 1
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small")
_lowerCAmelCase : str = tokenizer("Hello there", return_tensors="np").input_ids
_lowerCAmelCase : Dict = tokenizer("Hi I am", return_tensors="np").input_ids
_lowerCAmelCase : int = shift_tokens_right(__a, model.config.pad_token_id, model.config.decoder_start_token_id)
_lowerCAmelCase : List[Any] = model(__a, decoder_input_ids=__a).logits
_lowerCAmelCase : Union[str, Any] = optax.softmax_cross_entropy(__a, onehot(__a, logits.shape[-1])).mean()
_lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
_lowerCAmelCase : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_snake_case = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self, **__a):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase : Optional[int] = deprecated_arg[3:]
setattr(self, __a, not kwargs.pop(__a))
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}")
_lowerCAmelCase : int = kwargs.pop("torchscript", self.torchscript)
_lowerCAmelCase : Optional[Any] = kwargs.pop("torch_xla_tpu_print_metrics", self.torch_xla_tpu_print_metrics)
_lowerCAmelCase : Optional[Any] = kwargs.pop("fp16_opt_level", self.fpaa_opt_level)
super().__init__(**__a)
lowerCamelCase__ = field(default=a , metadata={'help': 'Trace the models using torchscript'})
lowerCamelCase__ = field(default=a , metadata={'help': 'Print Xla/PyTorch tpu metrics'})
lowerCamelCase__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def snake_case__ ( self):
'''simple docstring'''
requires_backends(self, ["torch"])
logger.info("PyTorch: setting up devices")
if not self.cuda:
_lowerCAmelCase : int = torch.device("cpu")
_lowerCAmelCase : int = 0
elif is_torch_tpu_available():
_lowerCAmelCase : List[str] = xm.xla_device()
_lowerCAmelCase : Optional[Any] = 0
else:
_lowerCAmelCase : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_lowerCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def snake_case__ ( self):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def snake_case__ ( self):
'''simple docstring'''
requires_backends(self, ["torch"])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def snake_case__ ( self):
'''simple docstring'''
requires_backends(self, ["torch"])
return self._setup_devices[0]
@property
def snake_case__ ( self):
'''simple docstring'''
requires_backends(self, ["torch"])
return self._setup_devices[1]
@property
def snake_case__ ( self):
'''simple docstring'''
return self.n_gpu > 0
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, "models/bert/"))
_lowerCAmelCase : List[Any] = self.transformer_dir
shutil.copy(
os.path.join(__a, "src/transformers/models/bert/modeling_bert.py"), os.path.join(self.transformer_dir, "models/bert/modeling_bert.py"), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "src/transformers"
shutil.rmtree(self.transformer_dir)
def snake_case__ ( self, __a, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Tuple = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_lowerCAmelCase : Tuple = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_lowerCAmelCase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119)
_lowerCAmelCase : str = black.format_str(__a, mode=__a)
_lowerCAmelCase : Optional[Any] = os.path.join(self.transformer_dir, "new_code.py")
with open(__a, "w", newline="\n") as f:
f.write(__a)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=__a)
with open(__a, "r") as f:
self.assertTrue(f.read(), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", REFERENCE_CODE + "\n", )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", __a, )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", re.sub("Bert", "TestModel", __a), )
# Copy consistency with a really long name
_lowerCAmelCase : Dict = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}", f"{long_class_name}LMPredictionHead", re.sub("Bert", __a, __a), )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", __a, overwrite_result=re.sub("Bert", "TestModel", __a), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
_lowerCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
_lowerCAmelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
_lowerCAmelCase , _lowerCAmelCase : Dict = check_copies.convert_to_localized_md(
__a, __a, localized_readme["format_model_list"])
self.assertFalse(__a)
self.assertEqual(__a, __a)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = check_copies.convert_to_localized_md(
__a, __a, localized_readme["format_model_list"])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__a)
_lowerCAmelCase : Union[str, Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
_lowerCAmelCase : List[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase : Optional[int] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = check_copies.convert_to_localized_md(
__a, __a, localized_readme["format_model_list"])
# Check if the model link is synchronized.
self.assertEqual(__a, __a)
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(_lowerCamelCase )
if n_element < 1:
_lowerCAmelCase : Any = ValueError("a should be a positive number" )
raise my_error
_lowerCAmelCase : Dict = [1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = (0, 0, 0)
_lowerCAmelCase : Union[str, Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_snake_case = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.load(_lowerCamelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCAmelCase : Optional[int] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase : Any = sd.pop(_lowerCamelCase )
_lowerCAmelCase : List[str] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase : Dict = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase : str = key.replace(".qkv_proj." , ".q_proj." )
_lowerCAmelCase : Dict = key.replace(".qkv_proj." , ".k_proj." )
_lowerCAmelCase : Union[str, Any] = key.replace(".qkv_proj." , ".v_proj." )
_lowerCAmelCase : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = torch.split(_lowerCamelCase , depth // 3 , dim=0 )
_lowerCAmelCase : Tuple = q
_lowerCAmelCase : Dict = k
_lowerCAmelCase : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_checkpoint(_lowerCamelCase )
if config is not None:
_lowerCAmelCase : Any = OPTConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : List[Any] = OPTConfig()
_lowerCAmelCase : str = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_snake_case = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
_snake_case = "Input must be a string of 8 numbers plus letter"
_snake_case = "TRWAGMYFPDXBNJZSQVHLCKE"
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = F"Expected string as input, found {type(_lowerCamelCase ).__name__}"
raise TypeError(_lowerCamelCase )
_lowerCAmelCase : Dict = spanish_id.replace("-" , "" ).upper()
if len(_lowerCamelCase ) != 9:
raise ValueError(_lowerCamelCase )
try:
_lowerCAmelCase : Union[str, Any] = int(spanish_id_clean[0:8] )
_lowerCAmelCase : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowerCamelCase ) from ex
if letter.isdigit():
raise ValueError(_lowerCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt")
_lowerCAmelCase : List[Any] = text_classifier("This is great !")
self.assertEqual(nested_simplify(__a), [{"label": "LABEL_0", "score": 0.504}])
_lowerCAmelCase : Union[str, Any] = text_classifier("This is great !", top_k=2)
self.assertEqual(
nested_simplify(__a), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}])
_lowerCAmelCase : str = text_classifier(["This is great !", "This is bad"], top_k=2)
self.assertEqual(
nested_simplify(__a), [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
], )
_lowerCAmelCase : List[str] = text_classifier("This is great !", top_k=1)
self.assertEqual(nested_simplify(__a), [{"label": "LABEL_0", "score": 0.504}])
# Legacy behavior
_lowerCAmelCase : Dict = text_classifier("This is great !", return_all_scores=__a)
self.assertEqual(nested_simplify(__a), [{"label": "LABEL_0", "score": 0.504}])
_lowerCAmelCase : Dict = text_classifier("This is great !", return_all_scores=__a)
self.assertEqual(
nested_simplify(__a), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]])
_lowerCAmelCase : Any = text_classifier(["This is great !", "Something else"], return_all_scores=__a)
self.assertEqual(
nested_simplify(__a), [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
], )
_lowerCAmelCase : Union[str, Any] = text_classifier(["This is great !", "Something else"], return_all_scores=__a)
self.assertEqual(
nested_simplify(__a), [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
], )
@require_torch
def snake_case__ ( self):
'''simple docstring'''
import torch
_lowerCAmelCase : Tuple = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch.device("cpu"), )
_lowerCAmelCase : Tuple = text_classifier("This is great !")
self.assertEqual(nested_simplify(__a), [{"label": "LABEL_0", "score": 0.504}])
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf")
_lowerCAmelCase : List[str] = text_classifier("This is great !")
self.assertEqual(nested_simplify(__a), [{"label": "LABEL_0", "score": 0.504}])
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = pipeline("text-classification")
_lowerCAmelCase : int = text_classifier("This is great !")
self.assertEqual(nested_simplify(__a), [{"label": "POSITIVE", "score": 1.0}])
_lowerCAmelCase : Dict = text_classifier("This is bad !")
self.assertEqual(nested_simplify(__a), [{"label": "NEGATIVE", "score": 1.0}])
_lowerCAmelCase : int = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(__a), [{"label": "POSITIVE", "score": 0.988}])
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = pipeline("text-classification", framework="tf")
_lowerCAmelCase : Optional[Any] = text_classifier("This is great !")
self.assertEqual(nested_simplify(__a), [{"label": "POSITIVE", "score": 1.0}])
_lowerCAmelCase : str = text_classifier("This is bad !")
self.assertEqual(nested_simplify(__a), [{"label": "NEGATIVE", "score": 1.0}])
_lowerCAmelCase : Any = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(__a), [{"label": "POSITIVE", "score": 0.988}])
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TextClassificationPipeline(model=__a, tokenizer=__a)
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_lowerCAmelCase : Dict = "HuggingFace is in"
_lowerCAmelCase : Any = text_classifier(__a)
self.assertEqual(nested_simplify(__a), [{"label": ANY(__a), "score": ANY(__a)}])
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
_lowerCAmelCase : Optional[int] = ["HuggingFace is in ", "Paris is in France"]
_lowerCAmelCase : Tuple = text_classifier(__a)
self.assertEqual(
nested_simplify(__a), [{"label": ANY(__a), "score": ANY(__a)}, {"label": ANY(__a), "score": ANY(__a)}], )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_lowerCAmelCase : Optional[Any] = text_classifier(__a, top_k=__a)
_lowerCAmelCase : Tuple = len(model.config.idalabel.values())
self.assertEqual(
nested_simplify(__a), [[{"label": ANY(__a), "score": ANY(__a)}] * N, [{"label": ANY(__a), "score": ANY(__a)}] * N], )
_lowerCAmelCase : List[str] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
_lowerCAmelCase : int = text_classifier(__a)
self.assertEqual(
nested_simplify(__a), {"label": ANY(__a), "score": ANY(__a)}, )
self.assertTrue(outputs["label"] in model.config.idalabel.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_lowerCAmelCase : Tuple = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__a):
text_classifier(__a)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_lowerCAmelCase : Dict = text_classifier([[["HuggingFace is in ", "Paris is in France"]]])
self.assertEqual(
nested_simplify(__a), [{"label": ANY(__a), "score": ANY(__a)}], )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 1
|
import functools
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = len(_lowerCamelCase )
_lowerCAmelCase : Dict = len(_lowerCamelCase )
@functools.cache
def min_distance(_lowerCamelCase , _lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_lowerCAmelCase : Dict = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCamelCase ) , 1 + min_distance(_lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 1
|
import sys
import turtle
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase ) , get_mid(_lowerCamelCase , _lowerCamelCase ) , depth - 1 )
triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase ) , get_mid(_lowerCamelCase , _lowerCamelCase ) , depth - 1 )
triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase ) , get_mid(_lowerCamelCase , _lowerCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
_snake_case = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
_snake_case = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 300
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 1
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( enum.Enum):
lowerCamelCase__ = 0
lowerCamelCase__ = 1
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'generated'
def __init__( self, *__a, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = {}
if truncation is not None:
_lowerCAmelCase : Optional[int] = truncation
_lowerCAmelCase : Tuple = generate_kwargs
_lowerCAmelCase : Optional[int] = {}
if return_tensors is not None and return_type is None:
_lowerCAmelCase : Union[str, Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_lowerCAmelCase : int = return_type
if clean_up_tokenization_spaces is not None:
_lowerCAmelCase : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowerCAmelCase : Optional[int] = self.tokenizer.encode(__a, add_special_tokens=__a)
if len(__a) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_lowerCAmelCase : Tuple = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self, *__a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0], __a):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_lowerCAmelCase : str = ([prefix + arg for arg in args[0]],)
_lowerCAmelCase : Tuple = True
elif isinstance(args[0], __a):
_lowerCAmelCase : List[Any] = (prefix + args[0],)
_lowerCAmelCase : List[str] = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`")
_lowerCAmelCase : List[str] = self.tokenizer(*__a, padding=__a, truncation=__a, return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : int = super().__call__(*__a, **__a)
if (
isinstance(args[0], __a)
and all(isinstance(__a, __a) for el in args[0])
and all(len(__a) == 1 for res in result)
):
return [res[0] for res in result]
return result
def snake_case__ ( self, __a, __a=TruncationStrategy.DO_NOT_TRUNCATE, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = self._parse_and_tokenize(__a, truncation=__a, **__a)
return inputs
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
if self.framework == "pt":
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_inputs["input_ids"].shape
elif self.framework == "tf":
_lowerCAmelCase , _lowerCAmelCase : int = tf.shape(model_inputs["input_ids"]).numpy()
_lowerCAmelCase : Tuple = generate_kwargs.get("min_length", self.model.config.min_length)
_lowerCAmelCase : Union[str, Any] = generate_kwargs.get("max_length", self.model.config.max_length)
self.check_inputs(__a, generate_kwargs["min_length"], generate_kwargs["max_length"])
_lowerCAmelCase : Union[str, Any] = self.model.generate(**__a, **__a)
_lowerCAmelCase : int = output_ids.shape[0]
if self.framework == "pt":
_lowerCAmelCase : Tuple = output_ids.reshape(__a, out_b // in_b, *output_ids.shape[1:])
elif self.framework == "tf":
_lowerCAmelCase : str = tf.reshape(__a, (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def snake_case__ ( self, __a, __a=ReturnType.TEXT, __a=False):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_lowerCAmelCase : Union[str, Any] = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
_lowerCAmelCase : Union[str, Any] = {
f"{self.return_name}_text": self.tokenizer.decode(
__a, skip_special_tokens=__a, clean_up_tokenization_spaces=__a, )
}
records.append(__a)
return records
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'summary'
def __call__( self, *__a, **__a):
'''simple docstring'''
return super().__call__(*__a, **__a)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.")
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})")
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'translation'
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def snake_case__ ( self, *__a, __a=TruncationStrategy.DO_NOT_TRUNCATE, __a=None, __a=None):
'''simple docstring'''
if getattr(self.tokenizer, "_build_translation_inputs", __a):
return self.tokenizer._build_translation_inputs(
*__a, return_tensors=self.framework, truncation=__a, src_lang=__a, tgt_lang=__a)
else:
return super()._parse_and_tokenize(*__a, truncation=__a)
def snake_case__ ( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = super()._sanitize_parameters(**__a)
if src_lang is not None:
_lowerCAmelCase : int = src_lang
if tgt_lang is not None:
_lowerCAmelCase : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_lowerCAmelCase : Optional[int] = kwargs.get("task", self.task)
_lowerCAmelCase : Union[str, Any] = task.split("_")
if task and len(__a) == 4:
# translation, XX, to YY
_lowerCAmelCase : Optional[int] = items[1]
_lowerCAmelCase : Optional[int] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self, *__a, **__a):
'''simple docstring'''
return super().__call__(*__a, **__a)
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_snake_case = logging.getLogger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'sequence-classification'
def __init__( self, __a):
'''simple docstring'''
if type(__a) == dict:
_lowerCAmelCase : Union[str, Any] = Namespace(**__a)
_lowerCAmelCase : str = glue_output_modes[hparams.task]
_lowerCAmelCase : Tuple = glue_tasks_num_labels[hparams.task]
super().__init__(__a, __a, self.mode)
def snake_case__ ( self, **__a):
'''simple docstring'''
return self.model(**__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase : Dict = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_lowerCAmelCase : str = self(**__a)
_lowerCAmelCase : Any = outputs[0]
_lowerCAmelCase : Tuple = self.trainer.lr_schedulers[0]["scheduler"]
_lowerCAmelCase : List[Any] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.hparams
_lowerCAmelCase : Any = processors[args.task]()
_lowerCAmelCase : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase : Optional[int] = self._feature_file(__a)
if os.path.exists(__a) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", __a)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
_lowerCAmelCase : Optional[int] = (
processor.get_dev_examples(args.data_dir)
if mode == "dev"
else processor.get_train_examples(args.data_dir)
)
_lowerCAmelCase : Union[str, Any] = convert_examples_to_features(
__a, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode, )
logger.info("Saving features into cached file %s", __a)
torch.save(__a, __a)
def snake_case__ ( self, __a, __a, __a = False):
'''simple docstring'''
_lowerCAmelCase : Any = "dev" if mode == "test" else mode
_lowerCAmelCase : Optional[int] = self._feature_file(__a)
logger.info("Loading features from cached file %s", __a)
_lowerCAmelCase : int = torch.load(__a)
_lowerCAmelCase : int = torch.tensor([f.input_ids for f in features], dtype=torch.long)
_lowerCAmelCase : List[str] = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
_lowerCAmelCase : List[str] = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase : Optional[Any] = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase : Tuple = torch.tensor([f.label for f in features], dtype=torch.float)
return DataLoader(
TensorDataset(__a, __a, __a, __a), batch_size=__a, shuffle=__a, )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase : Optional[int] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_lowerCAmelCase : Any = self(**__a)
_lowerCAmelCase , _lowerCAmelCase : Tuple = outputs[:2]
_lowerCAmelCase : List[str] = logits.detach().cpu().numpy()
_lowerCAmelCase : Any = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item()
_lowerCAmelCase : Union[str, Any] = np.concatenate([x["pred"] for x in outputs], axis=0)
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase : Dict = np.argmax(__a, axis=1)
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase : Dict = np.squeeze(__a)
_lowerCAmelCase : Any = np.concatenate([x["target"] for x in outputs], axis=0)
_lowerCAmelCase : List[str] = [[] for _ in range(out_label_ids.shape[0])]
_lowerCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0])]
_lowerCAmelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task, __a, __a)}
_lowerCAmelCase : List[Any] = dict(results.items())
_lowerCAmelCase : Dict = results
return ret, preds_list, out_label_list
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = self._eval_end(__a)
_lowerCAmelCase : int = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = self._eval_end(__a)
_lowerCAmelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case__ ( __a, __a):
'''simple docstring'''
BaseTransformer.add_model_specific_args(__a, __a)
parser.add_argument(
"--max_seq_length", default=128, type=__a, help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
), )
parser.add_argument(
"--task", default="", type=__a, required=__a, help="The GLUE task to run", )
parser.add_argument(
"--gpus", default=0, type=__a, help="The number of GPUs allocated for this, it is by default 0 meaning none", )
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
return parser
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(_lowerCamelCase , os.getcwd() )
_lowerCAmelCase : Dict = GLUETransformer.add_model_specific_args(_lowerCamelCase , os.getcwd() )
_lowerCAmelCase : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase : str = os.path.join(
"./results" , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
_lowerCAmelCase : List[Any] = GLUETransformer(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = generic_train(_lowerCamelCase , _lowerCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase : List[str] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowerCamelCase )
if __name__ == "__main__":
main()
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
from __future__ import annotations
from math import pi, sqrt
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
# Lint as: python3
import itertools
import os
import re
_snake_case = re.compile(R"([A-Z]+)([A-Z][a-z])")
_snake_case = re.compile(R"([a-z\d])([A-Z])")
_snake_case = re.compile(R"(?<!_)_(?!_)")
_snake_case = re.compile(R"(_{2,})")
_snake_case = R"^\w+(\.\w+)*$"
_snake_case = R"<>:/\|?*"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = _uppercase_uppercase_re.sub(r"\1_\2" , _lowerCamelCase )
_lowerCAmelCase : List[str] = _lowercase_uppercase_re.sub(r"\1_\2" , _lowerCamelCase )
return name.lower()
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = _single_underscore_re.split(_lowerCamelCase )
_lowerCAmelCase : Tuple = [_multiple_underscores_re.split(_lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCamelCase ) if n != "" )
def A ( _lowerCamelCase ):
'''simple docstring'''
if os.path.basename(_lowerCamelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if os.path.basename(_lowerCamelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , _lowerCamelCase ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(_lowerCamelCase )}-{split}"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = filename_prefix_for_split(_lowerCamelCase , _lowerCamelCase )
if filetype_suffix:
prefix += F".{filetype_suffix}"
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
return F"{filepath}*"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = filename_prefix_for_split(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if shard_lengths:
_lowerCAmelCase : List[Any] = len(_lowerCamelCase )
_lowerCAmelCase : List[Any] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_lowerCamelCase )]
if filetype_suffix:
_lowerCAmelCase : Optional[Any] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
_lowerCAmelCase : Any = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 300
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 1
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = len(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_lowerCamelCase ) - pat_len + 1 ):
_lowerCAmelCase : Optional[int] = True
for j in range(_lowerCamelCase ):
if s[i + j] != pattern[j]:
_lowerCAmelCase : Any = False
break
if match_found:
position.append(_lowerCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'gpt_neox'
def __init__( self, __a=5_0432, __a=6144, __a=44, __a=64, __a=2_4576, __a="gelu", __a=0.25, __a=1_0000, __a=0.0, __a=0.0, __a=0.1, __a=2048, __a=0.02, __a=1E-5, __a=True, __a=0, __a=2, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(bos_token_id=__a, eos_token_id=__a, **__a)
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : List[Any] = rotary_pct
_lowerCAmelCase : List[Any] = rotary_emb_base
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : Any = hidden_dropout
_lowerCAmelCase : int = classifier_dropout
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : int = use_cache
_lowerCAmelCase : Optional[int] = tie_word_embeddings
_lowerCAmelCase : Union[str, Any] = use_parallel_residual
_lowerCAmelCase : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!")
def snake_case__ ( self):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, __a) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}")
_lowerCAmelCase : List[str] = self.rope_scaling.get("type", __a)
_lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("factor", __a)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(__a, __a) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 300
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 1
|
from __future__ import annotations
import os
from typing import Any
import requests
_snake_case = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_snake_case = BASE_URL + "/user"
# https://github.com/settings/tokens
_snake_case = os.environ.get("USER_TOKEN", "")
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {
"Authorization": F"token {auth_token}",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Union[str, Any] = "pt"
_lowerCAmelCase : List[Any] = "tf"
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained(self.test_model, from_pt=__a)
model_tf.save_pretrained(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
_lowerCAmelCase : List[str] = FeaturesManager.determine_framework(self.test_model, __a)
self.assertEqual(__a, __a)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a)
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(__a, __a)
self.assertEqual(__a, __a)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a)
_lowerCAmelCase : int = FeaturesManager.determine_framework(__a, __a)
self.assertEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a)
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(__a)
self.assertEqual(__a, self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a)
_lowerCAmelCase : List[Any] = FeaturesManager.determine_framework(__a)
self.assertEqual(__a, self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=__a)
with patch("transformers.onnx.features.is_tf_available", __a):
_lowerCAmelCase : str = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__a, self.framework_pt)
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Tuple = MagicMock(return_value=__a)
with patch("transformers.onnx.features.is_torch_available", __a):
_lowerCAmelCase : int = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__a, self.framework_tf)
# Both in environment -> use PyTorch
_lowerCAmelCase : Union[str, Any] = MagicMock(return_value=__a)
_lowerCAmelCase : str = MagicMock(return_value=__a)
with patch("transformers.onnx.features.is_tf_available", __a), patch(
"transformers.onnx.features.is_torch_available", __a):
_lowerCAmelCase : str = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__a, self.framework_pt)
# Both not in environment -> raise error
_lowerCAmelCase : Optional[int] = MagicMock(return_value=__a)
_lowerCAmelCase : str = MagicMock(return_value=__a)
with patch("transformers.onnx.features.is_tf_available", __a), patch(
"transformers.onnx.features.is_torch_available", __a):
with self.assertRaises(__a):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model)
| 300
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 1
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
# No specific FOR_XXX available yet
def __call__( self, __a, **__a):
'''simple docstring'''
return super().__call__(__a, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase : Tuple = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
_lowerCAmelCase : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def snake_case__ ( self, __a, __a=None, __a="This is a sound of {}."):
'''simple docstring'''
if isinstance(__a, __a):
if audio.startswith("http://") or audio.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase : Optional[int] = requests.get(__a).content
else:
with open(__a, "rb") as f:
_lowerCAmelCase : int = f.read()
if isinstance(__a, __a):
_lowerCAmelCase : str = ffmpeg_read(__a, self.feature_extractor.sampling_rate)
if not isinstance(__a, np.ndarray):
raise ValueError("We expect a numpy ndarray as input")
if len(audio.shape) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline")
_lowerCAmelCase : Any = self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt")
_lowerCAmelCase : List[str] = candidate_labels
_lowerCAmelCase : Any = [hypothesis_template.format(__a) for x in candidate_labels]
_lowerCAmelCase : Optional[int] = self.tokenizer(__a, return_tensors=self.framework, padding=__a)
_lowerCAmelCase : Tuple = [text_inputs]
return inputs
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = model_inputs.pop("candidate_labels")
_lowerCAmelCase : Any = model_inputs.pop("text_inputs")
if isinstance(text_inputs[0], __a):
_lowerCAmelCase : List[Any] = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase : List[Any] = text_inputs[0][0]
_lowerCAmelCase : Any = self.model(**__a, **__a)
_lowerCAmelCase : str = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = model_outputs.pop("candidate_labels")
_lowerCAmelCase : str = model_outputs["logits"][0]
if self.framework == "pt":
_lowerCAmelCase : str = logits.softmax(dim=0)
_lowerCAmelCase : Optional[Any] = probs.tolist()
else:
raise ValueError("`tf` framework not supported.")
_lowerCAmelCase : Any = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__a, __a), key=lambda __a: -x[0])
]
return result
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 300
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 1
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 1
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tmp_path / "cache"
_lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase : Dict = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tmp_path / "cache"
_lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : List[Any] = features.copy() if features else default_expected_features
_lowerCAmelCase : Dict = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase : str = ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = tmp_path / "cache"
_lowerCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Dict = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = parquet_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = [parquet_path]
_lowerCAmelCase : Dict = tmp_path / "cache"
_lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Any = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
_lowerCAmelCase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = tmp_path / "cache"
_lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase : List[Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tmp_path / "cache"
_lowerCAmelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
_lowerCAmelCase : Any = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase : Any = ParquetDatasetReader({"train": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if split:
_lowerCAmelCase : Dict = {split: parquet_path}
else:
_lowerCAmelCase : Tuple = "train"
_lowerCAmelCase : List[str] = {"train": parquet_path, "test": parquet_path}
_lowerCAmelCase : Any = tmp_path / "cache"
_lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Tuple = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCAmelCase : str = pq.ParquetFile(tmp_path / "foo.parquet" )
_lowerCAmelCase : List[Any] = pf.read()
assert dataset.data.table == output_table
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = str(shared_datadir / "test_image_rgb.jpg" )
_lowerCAmelCase : Union[str, Any] = {"image": [image_path]}
_lowerCAmelCase : List[Any] = Features({"image": Image()} )
_lowerCAmelCase : Dict = Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase )
_lowerCAmelCase : str = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCAmelCase : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_lowerCAmelCase : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert get_writer_batch_size(_lowerCamelCase ) == expected
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a) , 'Tatoeba directory does not exist.')
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"])
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.resolver.write_model_card("opus-mt-he-en", dry_run=__a)
assert mmeta["long_pair"] == "heb-eng"
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.